id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
12801495 | <reponame>chenrb/bk-sops<gh_stars>0
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific lan
"""
from .template_manager import TemplateManager
class TemplateImporter:
def __init__(self, template_model_cls):
self.template_model_cls = template_model_cls
def import_template(self, operator: str, template_data: list) -> dict:
manager = TemplateManager(template_model_cls=self.template_model_cls)
import_result = []
for td in template_data:
override_template_id = td["override_template_id"]
name = td["name"]
pipeline_tree = td["pipeline_tree"]
description = td["description"]
if not override_template_id:
import_result.append(
manager.create(
name=name,
creator=operator,
pipeline_tree=pipeline_tree,
template_kwargs={},
description=description,
)
)
else:
template = self.template_model_cls.objects.get(id=override_template_id)
import_result.append(
manager.update(
template=template,
editor=operator,
name=name,
pipeline_tree=pipeline_tree,
description=description,
)
)
return import_result
| StarcoderdataPython |
5019714 | <reponame>marcosptf/cpython-2.0.1
#! /usr/bin/env python
"""Test script for the imageop module. This has the side
effect of partially testing the imgfile module as well.
<NAME>
"""
from test_support import verbose, unlink
import imageop, uu
def main(use_rgbimg=1):
# Create binary test files
uu.decode(get_qualified_path('testrgb.uue'), 'test.rgb')
if use_rgbimg:
image, width, height = getrgbimage('test.rgb')
else:
image, width, height = getimage('test.rgb')
# Return the selected part of image, which should by width by height
# in size and consist of pixels of psize bytes.
if verbose:
print 'crop'
newimage = imageop.crop (image, 4, width, height, 0, 0, 1, 1)
# Return image scaled to size newwidth by newheight. No interpolation
# is done, scaling is done by simple-minded pixel duplication or removal.
# Therefore, computer-generated images or dithered images will
# not look nice after scaling.
if verbose:
print 'scale'
scaleimage = imageop.scale(image, 4, width, height, 1, 1)
# Run a vertical low-pass filter over an image. It does so by computing
# each destination pixel as the average of two vertically-aligned source
# pixels. The main use of this routine is to forestall excessive flicker
# if the image two vertically-aligned source pixels, hence the name.
if verbose:
print 'tovideo'
videoimage = imageop.tovideo (image, 4, width, height)
# Convert an rgb image to an 8 bit rgb
if verbose:
print 'rgb2rgb8'
greyimage = imageop.rgb2rgb8(image, width, height)
# Convert an 8 bit rgb image to a 24 bit rgb image
if verbose:
print 'rgb82rgb'
image = imageop.rgb82rgb(greyimage, width, height)
# Convert an rgb image to an 8 bit greyscale image
if verbose:
print 'rgb2grey'
greyimage = imageop.rgb2grey(image, width, height)
# Convert an 8 bit greyscale image to a 24 bit rgb image
if verbose:
print 'grey2rgb'
image = imageop.grey2rgb(greyimage, width, height)
# Convert a 8-bit deep greyscale image to a 1-bit deep image by
# thresholding all the pixels. The resulting image is tightly packed
# and is probably only useful as an argument to mono2grey.
if verbose:
print 'grey2mono'
monoimage = imageop.grey2mono (greyimage, width, height, 0)
# monoimage, width, height = getimage('monotest.rgb')
# Convert a 1-bit monochrome image to an 8 bit greyscale or color image.
# All pixels that are zero-valued on input get value p0 on output and
# all one-value input pixels get value p1 on output. To convert a
# monochrome black-and-white image to greyscale pass the values 0 and
# 255 respectively.
if verbose:
print 'mono2grey'
greyimage = imageop.mono2grey (monoimage, width, height, 0, 255)
# Convert an 8-bit greyscale image to a 1-bit monochrome image using a
# (simple-minded) dithering algorithm.
if verbose:
print 'dither2mono'
monoimage = imageop.dither2mono (greyimage, width, height)
# Convert an 8-bit greyscale image to a 4-bit greyscale image without
# dithering.
if verbose:
print 'grey2grey4'
grey4image = imageop.grey2grey4 (greyimage, width, height)
# Convert an 8-bit greyscale image to a 2-bit greyscale image without
# dithering.
if verbose:
print 'grey2grey2'
grey2image = imageop.grey2grey2 (greyimage, width, height)
# Convert an 8-bit greyscale image to a 2-bit greyscale image with
# dithering. As for dither2mono, the dithering algorithm is currently
# very simple.
if verbose:
print 'dither2grey2'
grey2image = imageop.dither2grey2 (greyimage, width, height)
# Convert a 4-bit greyscale image to an 8-bit greyscale image.
if verbose:
print 'grey42grey'
greyimage = imageop.grey42grey (grey4image, width, height)
# Convert a 2-bit greyscale image to an 8-bit greyscale image.
if verbose:
print 'grey22grey'
image = imageop.grey22grey (grey2image, width, height)
# Cleanup
unlink('test.rgb')
def getrgbimage(name):
"""return a tuple consisting of image (in 'imgfile' format but
using rgbimg instead) width and height"""
import rgbimg
try:
sizes = rgbimg.sizeofimage(name)
except rgbimg.error:
name = get_qualified_path(name)
sizes = rgbimg.sizeofimage(name)
if verbose:
print 'rgbimg opening test image: %s, sizes: %s' % (name, str(sizes))
image = rgbimg.longimagedata(name)
return (image, sizes[0], sizes[1])
def getimage(name):
"""return a tuple consisting of
image (in 'imgfile' format) width and height
"""
import imgfile
try:
sizes = imgfile.getsizes(name)
except imgfile.error:
name = get_qualified_path(name)
sizes = imgfile.getsizes(name)
if verbose:
print 'imgfile opening test image: %s, sizes: %s' % (name, str(sizes))
image = imgfile.read(name)
return (image, sizes[0], sizes[1])
def get_qualified_path(name):
""" return a more qualified path to name"""
import sys
import os
path = sys.path
try:
path = [os.path.dirname(__file__)] + path
except NameError:
pass
for dir in path:
fullname = os.path.join(dir, name)
if os.path.exists(fullname):
return fullname
return name
# rgbimg (unlike imgfile) is portable to platforms other than SGI.
# So we prefer to use it.
main(use_rgbimg=1)
| StarcoderdataPython |
82669 | # Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
# examples/Python/Basic/solution.py
import numpy as np
import os
import open3d as o3d
import sys
results_file = ""
ply_path = ""
if len(sys.argv) > 2 and len(sys.argv) < 4 :
ply_path = sys.argv[1]
results_file = sys.argv[2]
else :
pwd = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(pwd, os.pardir, os.pardir, os.pardir, "examples","TestData")
ply_path = os.path.join(data_dir, "test_mesh.ply")
results_file = os.path.join(data_dir, "results.txt")
mesh = o3d.io.read_triangle_mesh(ply_path)
resultFile = open(results_file, "w")
result = np.array(mesh.identically_colored_connected_components())
for i in range(0,len(result)):
resultFile.write(' '.join(map(str, result[i])))
resultFile.write('\n')
resultFile.close()
| StarcoderdataPython |
5060545 | <reponame>murawaki/comp-typology
#!/bin/env python
# -*- coding: utf-8 -*-
# simple parser of NEXUS annotated trees
import sys
import os
import re
def label_clades(node):
clade_dict = {}
def _label_clades_main(node):
label_list = []
for cnode in node.children:
label_list += _label_clades_main(cnode)
label_list.sort()
node.clade = ":".join(label_list)
# if hasattr(node, "left"):
# label_list = _label_clades_main(node.left) + _label_clades_main(node.right)
# label_list.sort()
# node.clade = ":".join(label_list)
if not hasattr(node, "parent"): # root
node.clade = "ROOT"
label_list = [node.clade]
elif hasattr(node, "name"):
# named nodes including leaves
node.clade = node.name
label_list = [node.clade]
clade_dict[node.clade] = node
return label_list
_label_clades_main(node)
return clade_dict
class Node(object):
def __init__(self, _id):
self._id = _id
self.children = []
class TreeParser(object):
START = 1
END = 2
EOS = 3
NODE = 4
COMMA = 5
ANNOTATION = 6
BRANCH = 7
taxa_re = re.compile(r"(?:(?P<uq>[A-Za-z0-9_\-\.\[\]]+)|(?:\'(?P<q>(?:\\\'|[^\'])+)\'))")
branch_re = re.compile(r"(\d+(?:\.\d+))")
def __init__(self, dat):
self.dat = dat
def parse(self):
tokens = self._tokenize(self.dat)
return self._parse(tokens)
def _tokenize(self, data):
tree_data = data[data.find('('):-1] # skip tree-level annotation & strip the last semicolon
idx = 0
tokens = []
while idx < len(tree_data):
if tree_data[idx] in ("\n",):
idx += 1
elif tree_data[idx] == '(':
tokens.append(self.START)
idx += 1
elif tree_data[idx] == ')':
tokens.append(self.END)
idx += 1
elif tree_data[idx] == ',':
tokens.append(self.COMMA)
idx += 1
elif tree_data[idx] == ';':
tokens.append(self.EOS)
idx += 1
elif tree_data[idx] == '[':
# annotation
idx2 = tree_data.find(']', idx + 1)
rawstr = tree_data[idx + 1:idx2]
annotation = {}
for kv in rawstr.split(','):
k, v = kv.split("=", 1)
annotation[k] = v
obj = {
'type': self.ANNOTATION,
'annotation': annotation,
}
idx = idx2 + 1
tokens.append(obj)
elif tree_data[idx] == ':':
match = self.branch_re.search(tree_data, idx + 1)
assert(match is not None)
obj = {
'type': self.BRANCH,
'branch': float(tree_data[match.start():match.end()]),
}
idx = match.end()
tokens.append(obj)
else:
match = self.taxa_re.search(tree_data, idx)
assert(match is not None)
taxa = match.group('uq') or match.group('q')
obj = {
'type': self.NODE,
'taxa': taxa,
}
idx = match.end()
tokens.append(obj)
return tokens
def _parse(self, tokens):
count = 0
root = Node(_id=count)
count += 1
node = root
rv = []
for token in tokens:
if token == self.START:
node2 = Node(_id=count)
count += 1
assert(len(node.children) == 0)
node.children.append(node2)
node2.parent = node
node = node2
elif token == self.END:
if hasattr(node, "parent"):
node = node.parent
else:
node = None
elif token == self.EOS:
rv.append(root)
root = Node(_id=count)
count += 1
node = root
elif token == self.COMMA:
node2 = Node(_id=count)
count += 1
assert(len(node.parent.children) > 0)
node.parent.children.append(node2)
node2.parent = node.parent
node = node2
elif token['type'] == self.ANNOTATION:
node.annotation = token['annotation']
elif token['type'] == self.BRANCH:
node.branch = token['branch']
elif token['type'] == self.NODE:
node.name = token['taxa']
return rv
if __name__ == "__main__":
import codecs
f = codecs.getreader("utf-8")(open(sys.argv[1]))
tp = TreeParser(f.read())
trees = tp.parse()
import cPickle as pickle
with open(sys.argv[2], "w") as f:
pickle.dump(trees, f)
| StarcoderdataPython |
1953254 | <reponame>parshakova/-GAMs<filename>r_plambda_pitheta_full.py<gh_stars>1-10
import argparse
import time
from datetime import datetime
import os
import sqlite3
import random
from random import shuffle
import math
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import pandas as pd
from fuzzysearch import find_near_matches
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='PyTorch LSTM Language Model')
parser.add_argument('--epochs', type=int, default=100, help='maximum number of epochs')
parser.add_argument('--ds_size', type=int, default=1000, help='training set size')
parser.add_argument('--distill_size', type=int, default=20000, help='training set size')
parser.add_argument('--motif', type=int, default=2, help='=1= short motif, =4= long motif')
parser.add_argument('--nmotifs', type=int, default=1, help='number of motifs that define the process')
parser.add_argument('--mtype', type=str, default='m', help='m, mam, m1m2, mult')
parser.add_argument('--n', type=int, default=30, help='string size')
parser.add_argument('--p', type=float, default=0.5, help='probability of flipping a coin')
parser.add_argument('--print_softm', type=str, default='', help='train or print')
parser.add_argument('--job', type=int, default=0, help='slurm job id')
#parser.add_argument('--feat', type=str, default='111', help='features for motifs with -.- separator; 0 or 1 at i-th position adds 0 to motif')
#parser.add_argument('--feat', type=str, default='1101000', help='features for motifs with -.- separator; (motif, supermotif, submotif, 1st bit==0, 10101, 1001001, 00110011)')
parser.add_argument('--feat', type=str, default='1001111', help='features for motifs with -.- separator; (motif, supermotif, submotif__2, 1st bit==0, 10101_len_m, 1001001_le_m_2, 00110011_len_m__2)')
parser.add_argument('--train', type=str, default='rs', help='=rs= rejection sampling, =snis_mix= snis mixture, =snis_r= snis r')
parser.add_argument('--restore', type=str, default='', help='checkpoint to restore model from')
parser.add_argument('--theta_fixed', action='store_false', help='train theta with lambda (log-linear model) or only lambda')
parser.add_argument('--test_run', action='store_true', help='if False - testing run, do not store accuracies')
parser.add_argument('--train2', type=str, default='distill', help='=distill=, =pg=, =dpg=, =cyclic_1=, =cyclic_r=')
parser.add_argument('--optim', type=str, default='adam', help='=adam=, =manual_lr=')
parser.add_argument('--debug_opt', type=str, default='no_motif', help='=no_motif=, =fix_length=')
parser.add_argument('--logdir', type=str, default='/tmp-network/user/tparshak')
parser.add_argument('--wandb', action='store_true')
parser.add_argument('--tensorboard', action='store_true')
parser.add_argument('--expect_len', type=float, default=30, help='expected length of strings in PFSA')
# hype parameters
parser.add_argument('--rl_lr', type=float, default=0.01, help='reinforcement learning learning rate')
parser.add_argument('--rl_scale_iter', type=float, default=100, help='reinforcement learning scaled number of iterations in one epoch')
parser.add_argument('--rl_target_kl', type=float, default=0.01, help='early stopping in ppo')
parser.add_argument('--rl_clip_param', type=float, default=0.2, help='in ppo')
parser.add_argument('--rl_value_loss_coeff', type=float, default=0.2, help='coefficient for critic loss')
parser.add_argument('--rl_seed', type=int, default=-999, help='for fair comparison')
parser.add_argument('--rl_patience', type=int, default=10, help='early stopping')
parser.add_argument('--rl_mini_batch', type=int, default=500, help='in rl setting')
parser.add_argument('--rl_plan_depth', type=int, default=1, help='plannign in AC D-PG')
"""
train2 combinations:
[dpg || pg || ppo] + [crit, wn]
[dpg || ac_dpg] + [stable_q]
[ppo_fl] + [crit]
[dpg] + [stable_q_fix]
"""
args = parser.parse_args()
if 'M' or 'v' in args.feat:
args.max_len = 100
else:
args.max_len = args.n*5
torch.set_printoptions(precision=15)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if args.rl_seed != -999:
np.random.seed(args.rl_seed)
torch.manual_seed(args.rl_seed)
torch.cuda.manual_seed(args.rl_seed)
random.seed(args.rl_seed)
torch.cuda.manual_seed_all(args.rl_seed)
# W&B configuration
if args.wandb:
import wandb
wandb.init(project=args.train2, name=str(args.job))
wandb.config.update(args)
args.nmotifs = 1
s = "\nParameters:\n"
for k in sorted(args.__dict__):
s += "{} = {} \n".format(k.lower(), args.__dict__[k])
print(s)
# input vocabulary size
ntoken = 5
batch_size = 500
nhid = 200
# one hot input vector embeddings size
ninp = 3
nlayers = 2
dropout = 0.2 # prob to be zeroed
loss_scale = 1
log_interval = 10
clip = 0.25
nsamples = 10
start_symbol = torch.tensor([[3]*10000]).cuda()
PAD = 4
timestamp = datetime.now().strftime("%mm%dd_%H%M_") + str(args.job)
print(timestamp)
if args.wandb:
wandb.log({'tstamp':timestamp})
# motif: ratio = 1: 1:50, 2: 1:100, 3: 1:500, 4: 1:1000
# choose 2,4,5,6,7
if args.mtype == 'm':
if args.motif == 1:
all_motifs = {10:'1111111', 30:'1000101111', 50:'10001010001'}
power_motifs = {30:21927961, 50:21571947468791}
elif args.motif == 2:
all_motifs = {30:'10001010001', 50:'100010100010'}
power_motifs = {30:10355564, 50:10547846544409}
elif args.motif == 3:
all_motifs = {30:'1000101000101', 50:'10011000111111', 100:'0111010000011101'}
power_motifs = {30:2334480, 50:2541261794559}
elif args.motif == 4:
all_motifs = {30:'10001011111000', 50:'100110001111111'}
power_motifs = {30:1113640, 50:1236662229247}
elif args.motif == 5:
all_motifs = {30:'01011101101'}
elif args.motif == 6:
all_motifs = {30:'001001100111'}
elif args.motif == 7:
all_motifs = {30:'1011100111001'}
elif args.mtype == 'mam':
if args.motif == 2:
all_motifs = {30:'100010100011.100010100011'}
power_motifs = {30:11787265}
elif args.motif == 3:
all_motifs = {30:'10001011111000.10001011111000'}
power_motifs = {30:3064058}
elif args.motif == 4:
all_motifs = {30:'1000101111100011.1000101111100011'}
power_motifs = {30:786542}
elif args.motif == 5:
all_motifs = {30:'01011101101.01011101101'}
elif args.motif == 6:
all_motifs = {30:'001001100111.001001100111'}
elif args.motif == 7:
all_motifs = {30:'1011100111001.1011100111001'}
elif args.mtype == 'mult':
if args.motif == 3:
all_motifs = {30:'multipl_3'}
elif args.motif == 17:
all_motifs = {30:'multipl_17'}
#wandb.config.update({'motif':all_motifs[args.n]})
entp_motifs_tm = {10:{'m.1111111':2.995732273553991/11}, 30:{'mult.multipl_3':19.62365305094772/31, 'mult.multipl_17':17.889051994558614/31, 'mam.100010100011':16.282530254126048/31, 'mam.1000101111100011':13.57540128031525/31,
'm.10001010001':16.15303451776991/31,'m.10001011111000':13.923144487457433/31,
'mam.10001011111000':14.935250784153713/31, 'm.01011101101':16.1633538708637/31,
'm.001001100111':15.420728378322668/31,'m.1011100111001':14.6736907/31, 'mam.01011101101':16.950563779/31,
'mam.001001100111':16.2827152768/31, 'mam.1011100111001':15.61062622/31, 'm.1000101000101':14.66329972621143/31}, 100:{'m.0111010000011101':62.665668876452344/101}}
z_motifs = {10:{'m.1111111':0.01953125}, 30:{'mult.multipl_3':0.3333333343343343, 'mult.multipl_17':0.05882352952952953, 'mam.100010100011':0.0046360, 'mam.1000101111100011':0.00022888,
'm.10001010001':0.00964437,'m.10001011111000':0.0010371580,
'mam.10001011111000':0.001037158, 'm.01011101101':0.0097444,
'm.001001100111':0.004637, 'm.1011100111001':0.002196863, 'mam.01011101101':0.00974440,
'mam.001001100111':0.004637, 'mam.1011100111001':0.002196863, 'm.1000101000101':0.0021741539239883423}, 100:{'m.0111010000011101':0.0012952530732785747}}
entp_motifs = {}
for ni, m in all_motifs.items():
if ni in entp_motifs_tm and ni == args.n:
entp_motifs[ni] = entp_motifs_tm[ni][args.mtype+'.'+m.split('.')[0]]
# get data
def get_batch(source_data, batch):
data = source_data[:-1,batch:batch+batch_size]
target = source_data[1:,batch:batch+batch_size].contiguous().view(-1)
return data, target
def get_batch_fsz(source_data, batch):
data = source_data[:-1,batch:batch+batch_size]
target = source_data[1:,batch:batch+batch_size].contiguous()
return data, target
def load_data_mult(n, sz, motif, ds_type):
ds = []
# input: <bos> binary string <eos>
# 3 {0,1}^n 2
data_file = os.path.join(os.path.join(args.logdir,'data'), 'multipl_%s'%(args.motif),"%s.txt"%ds_type)
max_len = 0
with open(data_file, "r") as file:
for line in file:
#assert motif in line
ds += [line.strip()]
max_len = max(max_len, len(line.strip()))
#print(line.strip())
if len(ds)>=sz:
break
n = max_len
args.n = max_len
original = ''
for l in ds:
original += ' '+ ''.join(c+' ' for c in l).strip()
original += ' 2 '+ ''.join(str(PAD)+' ' for _ in range(max_len-len(l))).strip()
original = original.strip()
print(len(original), max_len)
n += 1
original = np.fromstring(original, dtype=int, sep=' ')
original = original.reshape((original.shape[0]//n, n)).transpose()
for i in range(original.shape[1]):
res = ''.join([str(original[j,i]) for j in range(original.shape[0])])
#assert flag
dataset = (np.ones((n+1, original.shape[1]))).astype(int)
dataset[1:] = original
dataset[0] = dataset[0]*3
#dataset[-1] = dataset[-1]*2
print(dataset.shape, batch_size)
assert dataset.shape[1] >= sz
ds = dataset[:, :batch_size*int(1.0*dataset.shape[1]/batch_size)]
return torch.from_numpy(ds).cuda()
def load_data_motif(n, sz, motif, ds_type):
ds = ""
# input: <bos> binary string <eos>
# 3 {0,1}^n 2
if args.nmotifs == 1:
data_file = os.path.join(os.path.join(args.logdir,'data'), 'pfsa_%d_%s'%(n, motif),"%s.txt"%ds_type)
else:
data_file = os.path.join(os.path.join(args.logdir,'data'), 'pfsa_%d_%s'%(n-1, motif),"%s.txt"%ds_type)
with open(data_file, "r") as file:
for line in file:
#assert motif in line
ds += line.strip()
#print(line.strip())
if len(ds)>=sz*n:
break
original = ''.join(c+' ' for c in ds[:sz*n]).strip()
original = np.fromstring(original, dtype=int, sep=' ')
original = original.reshape((original.shape[0]//n, n)).transpose()
for i in range(original.shape[1]):
res = ''.join([str(original[j,i]) for j in range(original.shape[0])])
#assert flag
dataset = (np.ones((n+2, original.shape[1]))).astype(int)
dataset[1:-1] = original
dataset[0] = dataset[0]*3
dataset[-1] = dataset[-1]*2
print(dataset.shape, batch_size)
assert dataset.shape[1] >= sz
ds = dataset[:, :batch_size*int(1.0*dataset.shape[1]/batch_size)]
return torch.from_numpy(ds).cuda()
# ------------------------------------------------------------------------
# ------------ classes: RNN, GAMs, WhiteNoise with filter ----------------
def repackage_hidden(h):
"""detach vars from their history."""
return tuple(Variable(h[i].data) for i in range(len(h)))
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
# some part of the language model architecture from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModel/
class RNNModel(nn.Module):
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, policy=False, policy_log=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
# 0 1 <EOS> <BOS> PAD
one_hot_vecs = np.array([[1,0,0], [0,1,0], [0,0,1], [0,0,0], [0,0,0]])
self.encoder.weight.data.copy_(torch.from_numpy(one_hot_vecs))
self.freeze_layer(self.encoder)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)
# <bos> is not in the output vocabulary
self.decoder = nn.Linear(nhid, ninp)
self.policy = policy
if policy and ('crit' in args.train2 or 'ac_dpg' in args.train2):
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
# spit out values of Z(s) leaves
if 'ac_dpg_a' in args.train2:
zn_out = ninp
else:
zn_out = 1
if policy_log:
# log_Z(s)
self.critic = nn.Sequential(init_(nn.Linear(nhid, nhid)), nn.Tanh(), init_(nn.Linear(nhid, zn_out)))
else:
# Z(s)
self.critic = nn.Sequential(init_(nn.Linear(nhid, nhid)), nn.Tanh(), init_(nn.Linear(nhid, zn_out)), nn.ReLU())
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def freeze_layer(self, layer):
for param in layer.parameters():
param.requires_grad = False
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden, len_inp, mask, critic=False):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output) # [seq_len ,batch, nhid]
# [seq_len*batch, ntok]
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
decoded = torch.mul(decoded.view(output.size(0), output.size(1), decoded.size(1)), mask)
if self.policy and ('crit' in args.train2 or 'ac_dpg' in args.train2) and critic:
est_z = self.critic(output.view(output.size(0)*output.size(1), output.size(2)))
est_z = torch.mul(est_z.view(output.size(0), output.size(1), est_z.size(1)), mask)
return decoded, hidden, est_z
else:
return decoded, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
class White_noise_filter(nn.Module):
# biased white noise with filter for strings
# of length!=n and not containing the motif
def __init__(self, probs, feat, motifs):
super(White_noise_filter, self).__init__()
self.drop = nn.Dropout(dropout)
self.feat = feat
self.motifs = motifs
self.encoder = nn.Embedding(ntoken, 1)
one_hot_vecs = np.array([[pi] for pi in probs+[1, 1]])
self.encoder.weight.data.copy_(torch.from_numpy(one_hot_vecs))
# probs for: 0 1 <EOS> <BOS> PAD
self.probs = torch.tensor(probs).cuda()
self.freeze_layer(self.encoder)
def freeze_layer(self, layer):
for param in layer.parameters():
param.requires_grad = False
def init_hidden(self, bsz):
return (None, None)
def forward(self, input, hidden, len_tar, mask):
# [seq x batch x 1]
probs = self.encoder(input)
# 1 = no motif
x_feat = get_features(input, self.motifs, self.feat)[:,0]
if 'no_motif' in args.debug_opt:
x_feat = x_feat*0
log_lin = 0
#len_tar, _,_,_ = get_length_mask(input)
# 1 = length different from n
if 'i' in args.feat:
len_feat = (torch.abs(len_tar-(args.n+1))>=10).float()
else:
x_feat += (len_tar!=(args.n+1)).float()
infs = -torch.ones(probs.size(1)).cuda()*float('Inf')
if 'rew1' in args.debug_opt:
logits = torch.zeros(probs.size(1)).cuda()
else:
logits = torch.log(probs).sum(0).squeeze()
if 'i' in args.feat:
log_05 = np.log(0.5)
logits = torch.where(((x_feat==0) | (x_feat==0.5)) & (len_feat==0), logits, infs)
logits = torch.where(((x_feat==0.5) & (len_feat==0))|((x_feat==0) & (len_tar!=(args.n+1))), logits+log_05, logits)
if np.random.rand()<0.001:
print('X', input[:,:5], 'feat', x_feat[:5], logits[:5])
else:
x_feat = torch.clamp(x_feat, min=0, max=1)
# if all features are on - use logits, else prob is 0
logits = torch.where(x_feat==0, logits, infs)
#print(logits[:10].data.cpu().numpy(), len_tar[:10], (len_tar!=(args.n+1))[:10], args.n+1)
# [batch]
return logits.unsqueeze(1), None, log_lin
class GAMModel(nn.Module):
def __init__(self, ntoken, ninp, nhid, nlayers, feat, motifs, dropout=0.5):
super(GAMModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.feat = feat
self.motifs = motifs
self.encoder = nn.Embedding(ntoken, ninp)
# 0 1 <eos> <bos> PAD
one_hot_vecs = np.array([[1,0,0], [0,1,0], [0,0,1], [0,0,0], [0,0,0]])
self.encoder.weight.data.copy_(torch.from_numpy(one_hot_vecs))
self.freeze_layer(self.encoder)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)
# <bos> is not in the output vocabulary
self.decoder = nn.Linear(nhid, ninp)
if args.theta_fixed:
self.freeze_layer(self.decoder)
self.freeze_layer(self.rnn)
self.motifs = motifs
nfeat = sum([sum([int(e!='0') for e in el]) for el in feat])
self.lin_lambda = nn.Linear(nfeat, 1)
self.lin_lambda.bias.data = self.lin_lambda.bias.data * 0
self.lin_lambda.bias.requires_grad = False
self.lin_lambda.weight.data = self.lin_lambda.weight.data * 0
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def freeze_layer(self, layer):
for param in layer.parameters():
param.requires_grad = False
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden, len_inp, mask):
emb = self.encoder(input)
emb_pack = torch.nn.utils.rnn.pack_padded_sequence(emb, len_inp, batch_first=False)
out_pack, hidden = self.rnn(emb_pack, hidden)
output, _ = torch.nn.utils.rnn.pad_packed_sequence(out_pack, batch_first=False)
output = torch.mul(output, mask)
# [seq_len x batch x nhid]
output = self.drop(output)
# [ seq_len*batch x ntok]
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
x_feat = get_features(input, self.motifs, self.feat)
log_lin = self.lin_lambda(x_feat)
decoded = torch.mul(decoded.view(output.size(0), output.size(1), decoded.size(1)), mask)
return decoded, hidden, log_lin
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
def oracle_features(s, motifs, feat):
# s: seq_len x 1
# output: [ nfeat ]
# (motif, supermotif, submotif__2, 1st bit==0, 10101_len_m, 1001001_le_m_2, 00110011_len_m__2)
out = []
idx = min(1, len(s)-1)
nfeat = sum([sum([int(e!='0') for e in el]) for el in feat])
i = 0
for j in range(len(feat[i])):
if feat[i][j] == '1':
# correlated features
if j < len(args.feat)-4:
if args.nmotifs == 1:
if j == len(args.feat)-7:
# motif
if args.mtype == 'm':
out += [1 - int(motifs[i] in s)]
elif args.mtype == 'mult':
if s[0] == '3':
digits = s[1:]
else:
digits = s
end_idx = digits.find('2')
if end_idx != -1:
digits = digits[:end_idx]
if digits:
out += [1-int(int('0b'+digits,2)%args.motif == 0 and int('0b'+digits,2)!=0)]
else:
out += [1]
elif j == len(args.feat)-6:
# supermotif
motif_j = motifs[i] + '0'*1
out += [1 - int(motif_j in s)]
elif j == len(args.feat)-5:
# submotif
motif_j = motifs[i][:len(motifs[i])//2]
out += [1 - int(motif_j in s)]
elif args.nmotifs == 2:
if j in [j == len(args.feat)-8, j == len(args.feat)-6]:
# motif
out += [1 - int(motifs[max(0, j-1)] in s)]
elif j in [j == len(args.feat)-7, j == len(args.feat)-5]:
# submotif
motif_j = motifs[max(0, j-2)][:len(motifs[max(0, j-2)])//2]
out += [1 - int(motif_j in s)]
else:
# distractive features
if j == len(args.feat)-4:
# first bit
out += [1 - int(s[idx]=='1')]
# distractor
elif j == len(args.feat)-3:
pref = '10101'
motif_j = (pref*args.n)[:len(motifs[i])]
out += [1 - int(motif_j in s)]
elif j == len(args.feat)-2:
pref = '1001001'
motif_j = (pref*args.n)[:len(motifs[i])+2]
out += [1 - int(motif_j in s)]
elif j == len(args.feat)-1:
pref = '00110011'
motif_j = (pref*args.n)[:len(motifs[i])//2]
out += [1 - int(motif_j in s)]
elif feat[i][j] == 'e':
# edit distance
out += [get_edit_frc(s, motifs[i])]
elif feat[i][j] == 's':
out += [get_longestsubstr_frc(s, motifs[i])]
elif feat[i][j] == 'l':
out += [int(np.abs(len(s)-args.n-1)>=3)]
elif feat[i][j] == 'M':
out += [(len(s)*1.0)/args.max_len]
elif feat[i][j] == 'v':
out += [(1.0*len(s)**2)/(args.max_len**2)]
elif feat[i][j] == 'm':
out += [get_longestsubstr_frc(s, motifs[i]) + get_edit_frc(s, motifs[i])]
elif feat[i][j] == 'i':
val = get_longestsubstr(s, motifs[i])
if np.abs(val-len(motifs[i]))==0:
out += [0]
elif np.abs(val-len(motifs[i]))<=3:
out += [0.5]
else:
out += [1]
if np.random.rand()<0.00001:
print('X', s, 'val', val, np.abs(val-len(motifs[i])), out)
return out
def get_longestsubstr(s, motif):
n, m = len(s)+1, len(motif)+1
#assert m<=n
e = np.zeros((m,n))
max_lss = 0
for j in range(1,m):
for i in range(1,n):
e_ij = []
if s[i-1]!=motif[j-1]:
e[j,i]=0
else:
e[j,i] = 1 + e[j-1,i-1]
max_lss = max(max_lss, e[j,i])
return max_lss
def get_longestsubstr_frc(s, motif):
max_lss = get_longestsubstr(s, motif)
return 1-(1.*max_lss)/len(motif)
def get_edit_frc(s, motif):
def edit_distance(subs, motif):
n, m = len(subs)+1, len(motif)+1
#assert m<=n
e = np.zeros((m,n))
e[0,0] = 0
for j in range(1,m):
e[j,0]= j
for j in range(1,m):
for i in range(1,n):
e_ij = []
if j>0:
e_ij += [e[j-1,i]+1]
if i>0:
e_ij += [e[j,i-1]+1]
if j>0 and i>0:
e_ij += [e[j-1, i-1]+ int(subs[i-1]!=motif[j-1])]
if e_ij:
e[j,i] = min(e_ij)
return 1.0*min(e[-1,:])
ed_dist = edit_distance(s, motif)
edit_frac = ed_dist/len(motif)
assert edit_frac<=1
return edit_frac
def get_edit_frc1(s, motif):
def edit_distance(subs, motif):
n, m = len(subs)+1, len(motif)+1
#assert m<=n
e = np.ones((m,n))*m
e[0,0] = 0
for j in range(1,m):
e[j,0]=j
for i in range(1,n):
e[0,i]=i
for j in range(1,m):
for i in range(1,n):
e_ij = []
if j>0:
e_ij += [e[j-1,i]+1]
if i>0:
e_ij += [e[j,i-1]+1]
if j>0 and i>0:
e_ij += [e[j-1, i-1]+ int(subs[i-1]!=motif[j-1])]
if e_ij:
e[j,i] = min(e_ij)
return 1.0*min(e[-1,:])
ed_dist = len(motif)
for i in range(len(s)):
for j in range(i, len(s)):
ed_dist = min(ed_dist, edit_distance(s[i:i+j], motif))#
edit_frac = ed_dist/len(motif)
assert edit_frac<=1
return edit_frac
def get_features(var, motifs, feat):
# returns the results of identifying oracle features in the input binary sequence
# 0 = feature exists
# var: [ seq_len x batch ]
# output: [batch x nfeat]
def var_to_str(a):
a = a.data.cpu().numpy()
b = []
for i in range(a.shape[1]):
b += [''.join([str(el) for el in a[:,i]])]
return b
x = var_to_str(var)
out = []
for b in x:
out += [oracle_features(b, motifs, feat)]
return torch.tensor(out).cuda().float()
def argmax_quadratic(left,right,a,b):
'''
Find the argmax of $ax^2 + bx$ on the interval [left,right]
'''
if a < 0:
global_argmax = -b/(2*a)
if left < global_argmax and global_argmax < right:
return global_argmax
else:
return np.argmax([a*left**2 + b*left, a*right**2 + b*right])
else:
return np.argmax([a*left**2 + b*left, a*right**2 + b*right])
# -----------------------------------------------
# -------------------- utils --------------------
def to_one_hot(y, n_dims=None):
""" Take an integer vector (tensor of variable) and convert it to 1-hot matrix. """
y_tensor = y.data if isinstance(y, Variable) else y
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else ninp
y_one_hot = torch.zeros(y_tensor.size(0), n_dims).scatter_(1, y_tensor, 1).cuda()
return Variable(y_one_hot) if isinstance(y, Variable) else y_one_hot
def get_log_r(r_output, ce_target, mask_tar, ce_criterion):
# get logits from the AMs output layer for a specific target sequence
# r_output: [seq_len x batch x ninp] -> log_r_seq: [seq_len x batch x 1]
# ce_target: [seq_len x batch]; indices
# mask PAD symbol to keep short output vocabulary
ce_target = torch.mul(ce_target.float(), mask_tar[:,:,0]).long()
# [(n+1) x batch x 1]
r_output = torch.nn.functional.log_softmax(r_output, dim=2)
log_r_seq = torch.sum(r_output.view(-1, ninp) * to_one_hot(ce_target.view(-1)), dim = 1)
log_r_seq = torch.mul(log_r_seq.view(mask_tar.size()), mask_tar)
# [seq_len x batch x 1]
return log_r_seq
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def init_rnn_from_proposal(model_r, policy_log, policy):
# copy model_r to model_q
model_q = RNNModel(ntoken, ninp, nhid, nlayers, dropout, policy=policy, policy_log=policy_log)
model_q.cuda()
pretrained_dict = model_r.state_dict()
model_dict = model_q.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model_q.load_state_dict(model_dict)
return model_q
def cat_variable_length(a, b):
seq_len = max(a.size()[0], b.size()[0])
if a.size()[0] < seq_len:
padding = torch.ones((seq_len-a.size()[0], a.size(1))).long()*PAD
if a.is_cuda:
padding = padding.cuda()
a = torch.cat((a, padding), dim=0)
if b.size()[0] < seq_len:
padding = torch.ones((seq_len-b.size()[0], b.size(1))).long()*PAD
if a.is_cuda:
padding = padding.cuda()
b = torch.cat((b, padding), dim=0)
return torch.cat((a,b), dim=1)
def isfinite(x):
"""
Quick pytorch test that there are no nan's or infs.
note: torch now has torch.isnan
url: https://gist.github.com/wassname/df8bc03e60f81ff081e1895aabe1f519
"""
not_inf = ((x + 1) != x)
not_nan = (x == x)
return not_inf & not_nan
def sample_from_rnn(model):
n = args.n
motifs = all_motifs[args.n].split('.')
batch_size = 5000
x, log_pi, inp, len_inp, action, mask_tar = sample_data_inp_targ_vary(model,
batch_size, max_len=500)
avg_len = np.round(len_inp.float().mean().data.cpu().numpy(),decimals=1)
print('avg len ', avg_len)
x = x.data.cpu().numpy()
count = 0
for i in range(x.shape[1]):
res = ''.join([str(x[j,i]) for j in range(x.shape[0])])
curr_count = 0
if args.mtype == 'mult':
if res[0] == '3':
digits=res[1:]
else:
digits=res
end_idx = digits.find('2')
if end_idx != -1:
digits = digits[:end_idx]
if digits:
curr_count += int(int('0b'+digits,2)%args.motif == 0 and int('0b'+digits,2)!=0)
else:
for motif in motifs:
if motif in res:
curr_count += 1
count += min(1, curr_count)
print('%d motifs in total %d' % (count, x.shape[1]))
motif_freq = (1.0*count)/x.shape[1]
return motif_freq, avg_len
def logsumexp(x, dim=None):
if dim is None:
xmax = x.max()
xmax_ = x.max()
return xmax_ + numpy.log(torch.exp(x - xmax).sum())
else:
xmax, _ = x.max(dim, keepdim=True)
xmax_, _ = x.max(dim)
return xmax_ + torch.log(torch.exp(x - xmax).sum(dim))
# -----------------------------------------------
# -------------- sampling from LM ---------------
def sample_lm_vary(model, batch_size_i, max_len=None, critic=False):
# sample strings of varying length
# output: [ seq_len x batch ]
model.eval()
# [ 1 x batch ]
# <pad> idx = 4
if not max_len:
max_len=args.n*2+1
EOS = 2; BOS = 3
out = [(torch.ones(1)*BOS).cuda().long()]*batch_size_i # contains sequences of variable lenght
symb = start_symbol[:,:batch_size_i]
hidden = model.init_hidden(batch_size_i)
len_inp = torch.ones((batch_size_i), dtype=torch.int64).cuda()
mask = torch.ones((1, batch_size_i, 1)).cuda()
all_logits = torch.ones((0, batch_size_i, ninp)).cuda()
if critic:
all_z = torch.zeros((0, batch_size_i, 1)).cuda()
for i in range(max_len):
# [1 x batch x ntok]
if critic:
logits, hidden, est_z = model(symb, hidden, len_inp, mask, critic=critic)
else:
logits, hidden = model(symb, hidden, len_inp, mask)[:2]
probs = softm(logits)
cat_dist = torch.distributions.Categorical(probs=probs)
# [ 1 x batch ]
symb = cat_dist.sample()
flag = False
for b in range(batch_size_i):
# if the sequence has not terminated yet
if i==0 or (i>0 and out[b][-1] != EOS):
out[b] = torch.cat((out[b], symb[:1,b]), dim=0)
flag = True
if not flag:
break
# TODO: instead of cat write into predefined array
all_logits = torch.cat((all_logits, logits), dim=0)
if critic:
all_z = torch.cat((all_z, est_z), dim=0)
out = torch.nn.utils.rnn.pad_sequence(out, batch_first=False, padding_value=PAD)
model.train()
# <bos> 010010101 <eos>
if critic:
return out, all_logits, all_z
else:
return out, all_logits
def sample_lm_vary_new(model, batch_size_i, max_len=None, critic=False):
# sample strings of varying length
# out: [ seq_len+1 x batch ]
# all_logits: [ seq_len x batch x ninp ]
# all_z: [ seq_len x batch x 1]
model.eval()
if not max_len:
max_len=args.n*2+1
EOS = 2; BOS = 3
symb = start_symbol[:,:batch_size_i]
hidden = model.init_hidden(batch_size_i)
len_inp = torch.ones((batch_size_i), dtype=torch.int64).cuda()
mask = torch.ones((1, batch_size_i, 1)).cuda()
all_logits = torch.ones((max_len, batch_size_i, ninp)).cuda()
out = torch.ones((max_len+1, batch_size_i)).cuda().long()
out[0,:] = out[0,:]*BOS
if critic:
all_z = torch.zeros((max_len, batch_size_i, 1)).cuda()
for i in range(max_len):
# [1 x batch x ntok]
if critic:
logits, hidden, est_z = model(symb, hidden, len_inp, mask, critic=critic)
else:
logits, hidden = model(symb, hidden, len_inp, mask)[:2]
probs = softm(logits)
cat_dist = torch.distributions.Categorical(probs=probs)
# [ 1 x batch ]
symb = cat_dist.sample()
out[i+1:i+2] = symb[:1]
all_logits[i:i+1] = logits
if critic:
all_z[i:i+1] = est_z
max_seq_len = 0
for b in range(batch_size_i):
ends = (out[:,b] == EOS).nonzero()
if ends.size(0) == 0:
continue # string does not contain EOS
idx = ends[0,0]
if idx == max_len:
max_seq_len = max_len
out[idx+1:,b] = out[idx+1:,b]*0 + PAD
all_logits[idx:,b] = all_logits[idx:,b]*0
if critic:
all_z[idx:,b] = all_z[idx:,b]*0
max_seq_len = max(max_seq_len, idx)
out = out[:max_seq_len+1]
all_logits = all_logits[:max_seq_len]
if critic:
all_z = all_z[:max_seq_len]
model.train()
# <bos> 010010101 <eos>
if critic:
return out, all_logits, all_z
else:
return out, all_logits
def sample_lm_vary_hid(model, batch_size_i, max_len=None, critic=False):
# output: [ seq_len x batch ]
model.eval()
# [ 1 x batch ]
# <pad> idx = 4
if not max_len:
max_len=args.n*2+1
out = [(torch.ones(1)*3).cuda().long()]*batch_size_i # contains sequences of variable lenght
symb = start_symbol[:,:batch_size_i]
hidden = model.init_hidden(batch_size_i)
len_inp = torch.ones((batch_size_i), dtype=torch.int64)
mask = torch.ones((1, batch_size_i, 1)).cuda()
all_logits = torch.ones((0, batch_size_i, ninp)).cuda()
hids = torch.zeros(model.nlayers, 0, model.nhid)
c_hids = torch.zeros(model.nlayers, 0, model.nhid)
if critic:
all_z = torch.zeros((0, batch_size_i, 1)).cuda()
for i in range(max_len):
# [1 x batch x ntok]
if critic:
logits, hidden, est_z = model(symb, hidden, len_inp, mask, critic=critic)
else:
logits, hidden = model(symb, hidden, len_inp, mask)[:2]
probs = softm(logits)
cat_dist = torch.distributions.Categorical(probs=probs)
# [ 1 x batch ]
symb = cat_dist.sample()
flag = False
for b in range(batch_size_i):
if i==0 or (i>0 and out[b][-1] != 2):
out[b] = torch.cat((out[b], symb[:1,b]), dim=0)
flag = True
if not flag:
break
hids = torch.cat((hids, hidden[0].cpu()), dim=1).detach()
c_hids = torch.cat((c_hids, hidden[1].cpu()), dim=1).detach()
all_logits = torch.cat((all_logits, logits), dim=0)
if critic:
all_z = torch.cat((all_z, est_z), dim=0)
out = torch.nn.utils.rnn.pad_sequence(out, batch_first=False, padding_value=PAD)
# <bos> 010010101 <eos>
model.train()
if critic:
return out, all_logits, hids, c_hids, all_z
else:
return out, all_logits, hids, c_hids
def sample_wn(model, batch_size_i, max_len=None):
# sample strings of varying length from white noise model
# output: [ seq_len x batch ]
# [ 1 x batch ]
# <pad> idx = 4
if not max_len:
max_len=args.n*2+1
out = [(torch.ones(1)*3).cuda().long()]*batch_size_i # contains sequences of variable lenght
all_logits = torch.ones((0, batch_size_i, ninp)).cuda()
# [1 x batch x ntok]
probs = model.probs.repeat(1, batch_size_i).view(1, -1, ninp)
logits = torch.log(probs)
for i in range(max_len):
cat_dist = torch.distributions.Categorical(probs=probs)
# [ 1 x batch ]
symb = cat_dist.sample()
flag = False
for b in range(batch_size_i):
# if the sequence has not terminated yet
if i==0 or (i>0 and out[b][-1] != 2):
out[b] = torch.cat((out[b], symb[:1,b]), dim=0)
flag = True
if not flag:
break
all_logits = torch.cat((all_logits, logits), dim=0)
out = torch.nn.utils.rnn.pad_sequence(out, batch_first=False, padding_value=PAD)
x = out; log_pi = all_logits
len_inp, mask_tar, inp, targets = get_length_mask(out)
# <bos> 010010101 <eos>
return x, log_pi, inp, len_inp, targets, mask_tar
softm = nn.Softmax(dim=2)
def sample_data_inp_targ_vary_hid(model, batch_size_i, max_len=None, critic=False):
# padded variable lengths sequences
# step by step
# [ 1 x seq_len*batch ]
if not max_len:
max_len = args.n*2+1
# x: [seq_len x batch]
# log_pi: [seq_len x batch x ninp]
# hids: [nlayers x batch*seq_len x nhid]
# est_z: [(n+1) x batch x 1]
if critic:
x, log_pi, hids, c_hids, est_z = sample_lm_vary_hid(model, batch_size_i, max_len, critic=critic)
else:
x, log_pi, hids, c_hids = sample_lm_vary_hid(model, batch_size_i, max_len)
len_inp = (x!= PAD).sum(0)
len_inp, perm_idx = len_inp.sort(0, descending=True)
len_inp = len_inp - 1
x = x[:, perm_idx]
inp = x[:-1,:]
log_pi = log_pi[:,perm_idx]
hids = hids.view(model.nlayers, inp.size(0), inp.size(1), model.nhid)[:, :, perm_idx]
hids = hids.view(model.nlayers, inp.size(0)*inp.size(1), model.nhid)
c_hids = c_hids.view(model.nlayers, inp.size(0), inp.size(1), model.nhid)[:, :, perm_idx]
c_hids = c_hids.view(model.nlayers, inp.size(0)*inp.size(1), model.nhid)
if critic:
est_z = est_z[:,perm_idx]
# [(n+1) x batch]
targets = x[1:,:]
mask_tar = (targets != PAD).unsqueeze(2).float().cuda()
len_tar = (targets != PAD).sum(0)
if critic:
return x, log_pi, inp, len_inp, targets, mask_tar, hids, c_hids, est_z
else:
return x, log_pi, inp, len_inp, targets, mask_tar, hids, c_hids
def sample_data_inp_targ_vary(model, batch_size_i, max_len=None, critic=False): ###TODO
# padded variable lengths sequences
# [ seq_len x batch ]
if not max_len:
max_len = args.n*2+1
if critic:
# est_z: [(n+1) x batch x 1]
x, log_pi, est_z = sample_lm_vary(model, batch_size_i, max_len, critic=critic)
else:
x, log_pi = sample_lm_vary(model, batch_size_i, max_len)
len_inp = (x!= PAD).sum(0)
len_inp, perm_idx = len_inp.sort(0, descending=True)
len_inp = len_inp - 1
x = x[:, perm_idx]
inp = x[:-1,:]
log_pi = log_pi[:,perm_idx]
if critic:
est_z = est_z[:,perm_idx]
# [(n+1) x batch]
targets = x[1:,:]
mask_tar = (targets != PAD).unsqueeze(2).float().cuda()
len_tar = (targets != PAD).sum(0)
if critic:
return x, log_pi, inp, len_inp, targets, mask_tar, est_z
else:
return x, log_pi, inp, len_inp, targets, mask_tar
def get_length_mask(x):
len_inp = (x!= PAD).sum(0)
len_inp, perm_idx = len_inp.sort(0, descending=True)
len_inp = len_inp - 1
x = x[:, perm_idx]
inp = x[:-1,:]
# [(n+1) x batch]
targets = x[1:,:]
mask_tar = (targets != PAD).unsqueeze(2).float().cuda()
len_tar = (targets != PAD).sum(0)
return len_tar, mask_tar, inp, targets.contiguous()
# -----------------------------------------------
# -------------- RS and SNIS --------------------
def upper_bound(params):
# for rejection sampling
out = log_upper_bound(params)
return torch.exp(out)
def log_upper_bound(params):
# for rejection sampling
# Q(x) = beta*r(x) >= exp(lambda*phi(x))*r(x) = P_lambda(x), all x
# beta >= exp(lambda*phi(x)), all x
# linear for all features except x and x**2 feats
out = torch.zeros((1)).cuda()
i = 0
while i < params.size(1):
# for the length feature - combine two coefficients
if 'M' == args.feat[i] and 'v' == args.feat[i+1]:
a = params[0,i+1]
b = params[0,i]
x = argmax_quadratic(0,1, a, b)
y = a*x**2 + b*x
out = torch.cat((out, y.unsqueeze(0)), dim=0)
i += 2
else:
assert args.feat[i] not in ['M', 'v']
curr = params[:1,i]
if curr > 0:
out = torch.cat((out, curr), dim=0)
i += 1
return out.sum(0)
def rejection_sampling(model, ce_criterion, motifs, feat, ro_stats):
# q(x)=r(x), Q(x)=q(x)*beta>=P_lambda(x)=r(x)*loglin for any x
# sample from LM: x ~ q(x)
# accept with probability ro = P_lambda(x)/Q(x)
nfeat = sum([sum([int(e!='0') for e in el]) for el in feat])
samples = torch.ones((1, nfeat)).cuda()
batch_size_i = 2*batch_size
acceptance_rate, total_samples, accepted = ro_stats
while samples.size(0) <= nsamples:
x, log_pi, inp, len_inp, targets, mask_tar = sample_data_inp_targ_vary(model, batch_size_i)
hidden = model.init_hidden(batch_size_i)
# log_lin [ batch x 1 ]
r_output, _, log_lin = model(inp, hidden, len_inp, mask_tar) # outpt [seq_len ,batch, ntok]
# [ batch x 1 ]
#log_r = get_log_r(r_output, targets, log_lin, mask_tar, ce_criterion)
#P_lambda = torch.exp(log_r + log_lin)
# upper bound: P_lambda <= q(x)*exp(max(lambda * feat))
log_beta = log_upper_bound(model.lin_lambda.weight)
ro = torch.exp(log_lin - log_beta)[:,0].cpu()
acceptance_rate = (total_samples*acceptance_rate + ro.sum())/(total_samples+ro.size(0))
indicator = torch.rand((ro.size(0))) <= ro
total_samples += ro.size(0)
accepted = accepted + indicator.sum().float()
all_feats = get_features(x, motifs, feat)
for i in range(indicator.size(0)):
if indicator[i]:
feat_x = all_feats[i:i+1]
samples = torch.cat((samples, feat_x), dim=0)
#accpt_samples = torch.cat((accpt_samples, x[:,i:i+1]), dim=1)
# samples [ nsamples x nfeat ]
return samples[1:nsamples+1,:].mean(0), [acceptance_rate, total_samples, accepted]
# keep samples for fixed theta
def get_samples_rs(model, x, inp, len_inp, mask_tar, acceptance_rate, total_samples, motifs, feat, accepted, batch_size_i, samples):
all_feats = get_features(x, motifs, feat)
log_lin = model.lin_lambda(all_feats)
# [ batch x 1 ]
#log_r = get_log_r(r_output, targets, log_lin, mask_tar, ce_criterion)
#P_lambda = torch.exp(log_r + log_lin)
# upper bound: P_lambda <= q(x)*exp(max(lambda * feat))
log_beta = log_upper_bound(model.lin_lambda.weight)
ro = torch.exp(log_lin - log_beta)[:,0].cpu()
acceptance_rate = (total_samples*acceptance_rate + ro.sum())/(total_samples+ro.size(0))
indicator = torch.rand((ro.size(0))) <= ro
total_samples += ro.size(0)
accepted = accepted + indicator.sum().float()
for i in range(indicator.size(0)):
if indicator[i]:
feat_x = all_feats[i:i+1]
samples = torch.cat((samples, feat_x), dim=0)
#accpt_samples = torch.cat((accpt_samples, x[:,i:i+1]), dim=1)
return samples, acceptance_rate, total_samples, accepted
def cyclic_rejection_sampling(model, ce_criterion, motifs, feat, ro_stats, am_samples):
# q(x)=r(x), Q(x)>=P_lambda(x) for any x
# sample from LM: x ~ q(x)
# accept with probability ro = P_lambda(x)/Q(x)
nfeat = sum([sum([int(e!='0') for e in el]) for el in feat])
samples = torch.ones((1, nfeat)).cuda()
batch_size_i = 2*batch_size
#accpt_samples = torch.ones((args.n+2, 1)).cuda().long()
acceptance_rate, total_samples, accepted = ro_stats
len_inp, mask_tar, inp, targets = get_length_mask(am_samples)
if am_samples.size(1) != 0:
samples, acceptance_rate, total_samples, accepted = get_samples_rs(model, am_samples, inp,
len_inp, mask_tar, acceptance_rate, total_samples, motifs, feat, accepted, batch_size_i, samples)
while samples.size(0) <= nsamples:
x, log_pi, inp, len_inp, targets, mask_tar = sample_data_inp_targ_vary(model, batch_size_i)
samples, acceptance_rate, total_samples, accepted = get_samples_rs(model, x, inp,
len_inp, mask_tar, acceptance_rate, total_samples, motifs, feat, accepted, batch_size_i, samples)
am_samples = cat_variable_length(am_samples, x)
# samples [ nsamples x nfeat ]
return samples[1:nsamples+1,:].mean(0), [acceptance_rate, total_samples, accepted], am_samples
def sample_data_inp_targ_snis(model, batch_size_i, source_data):
# padded variable lengths sequences
# [ seq_len x batch ]
x_r, _ = sample_lm_vary(model, batch_size_i)
if args.train == 'snis_mix':
d = source_data.size(1)
# [batch x |D|] empirical dsitribution
probs = torch.ones((batch_size_i, d))*(1.0/d)
cat_dist = torch.distributions.Categorical(probs=probs)
# [ batch ]
d_idx = cat_dist.sample()
# [ seq_len x batch x 1 ]
x_d = source_data[:, d_idx]
r_or_d = (torch.rand((batch_size_i))>0.5).cuda()
seq_len = max(x_r.size()[0], x_d.size()[0])
if x_r.size()[0] < seq_len:
x_r = torch.cat((x_r, torch.ones((seq_len-x_r.size()[0], batch_size_i)).long().cuda()*PAD), dim=0)
if x_d.size()[0] < seq_len:
x_d = torch.cat((x_d, torch.ones((seq_len-x_d.size()[0], batch_size_i)).long().cuda()*PAD), dim=0)
x = torch.where(r_or_d, x_r, x_d)
elif args.train == 'snis_r':
r_or_d = (torch.ones((batch_size_i))).cuda()
x = x_r
len_inp = (x!= PAD).sum(0)
len_inp, perm_idx = len_inp.sort(0, descending=True)
# adjust due to the fact of sampling in mixture
max_len = torch.max(len_inp)
x = x[:max_len,:]
len_inp = len_inp - 1
x = x[:, perm_idx]
inp = x[:-1,:]
# [(n+1) x batch]
targets = x[1:,:]
mask_tar = (targets != PAD).unsqueeze(2).float().cuda()
len_tar = (targets != PAD).sum(0)
return x, inp, len_inp, targets, mask_tar, r_or_d
def snis(model, ce_criterion, motifs, feat, source_data, hash_source, total_feat, total_w):
# q(x)=0.5r(x) + 0.5D(x), D(x) empirical distribution
# sample from LM: x ~ q(x)
# weighted expectation w.r.t w_i = P_lambda(x_i)/q(x_i)
nfeat = sum([sum([int(e!='0') for e in el]) for el in feat])
batch_size_i = 2*batch_size
x, inp, len_inp, targets, mask_tar, r_or_d = sample_data_inp_targ_snis(model, batch_size_i, source_data)
# [batch x nfeat]
all_feats = get_features(x, motifs, feat)
hidden = model.init_hidden(batch_size_i)
# log_lin [ batch x 1 ]
r_output, _, log_lin = model(inp, hidden, len_inp, mask_tar) # outpt [seq_len ,batch, ntok]
# [ batch x 1 ]
log_r = get_log_r(r_output, targets, mask_tar, ce_criterion).sum(0)
# P_lambda = torch.exp(log_r + log_lin)
d = source_data.size(1)
probs = (torch.ones((batch_size_i))*(1.0/d)).cuda()
if args.train == 'snis_mix':
for b in range(x.size(1)):
if r_or_d[b] == 0: continue
x_i = ''.join([str(el) for el in x[:,b].cpu().numpy()])
if hash(x_i) in hash_source and x_i in hash_source[hash(x_i)]:
r_or_d[b] = 0
q = 0.5*torch.exp(log_r) + 0.5*torch.where(r_or_d, torch.zeros(probs.size()).cuda(), probs).unsqueeze(1)
w = torch.exp(log_r + log_lin - torch.log(q))
if total_feat[:,:].size(0) != 0:
all_feats = torch.cat((total_feat, all_feats), dim=0)
w = torch.cat((total_w, w), dim=0)
elif args.train == 'snis_r':
# q = torch.exp(log_r)
if total_feat[:,:].size(0) != 0:
all_feats = torch.cat((total_feat, all_feats), dim=0)
log_lin = model.lin_lambda(all_feats)
# log_r + log_lin - log_r
w = torch.exp(log_lin)
mean_feats = torch.mul(all_feats, w).sum(0)/w.sum()
# samples [ nsamples x nfeat ]
return mean_feats.detach(), w, all_feats
# ------------------------------------------------
# -------------- get cross-entropy ---------------
def evaluate(model, criterion, source_data):
model.eval()
total_loss = 0
batches_id = list(range(0, source_data.size(1), batch_size))
for i, batch in enumerate(batches_id):
len_tar, mask_tar, data, target = get_length_mask(source_data[:,batch:batch+batch_size])
batch_size_i = mask_tar.size()[1]
target = torch.mul(target.float(), mask_tar[:,:,0]).long()
hidden = model.init_hidden(batch_size_i)
output, hidden = model(data, hidden, 0, mask_tar)
output_flat = output.view(-1, ninp)
# [(n+1) x batch x 1]
loss = criterion(output.view(-1, ninp), target.view(-1)).view(mask_tar.size())
loss = torch.div(torch.mul(loss, mask_tar).sum(0).squeeze(), len_tar.float()).mean()
total_loss += loss.data.float()
return total_loss / len(batches_id)
def evaluate_ce_pl_ds(model, ce_criterion, source_data, z_estim=None):
# evaluate cross entropy on the whole dataset
model.eval()
total_loss = 0
likelih = torch.tensor([[1.0/source_data.size(1)]]*batch_size).cuda()
batches_id = list(range(0, source_data.size(1), batch_size))
if not z_estim:
z_estim = estimate_partition_mc(model, ce_criterion)
for i, batch in enumerate(batches_id):
len_tar, mask_tar, data, target = get_length_mask(source_data[:,batch:batch+batch_size])
batch_size_i = mask_tar.size()[1]
hidden = model.init_hidden(batch_size_i)
r_output, hidden, log_lin = model(data, hidden, len_tar, mask_tar)
# [ batch x 1 ]
log_r = get_log_r(r_output, target, mask_tar, ce_criterion).sum(0)
P_lambda = torch.exp(log_r + log_lin)
p_lambda = P_lambda/z_estim
ce_loss = (-1*torch.mul(likelih, torch.log(p_lambda))).sum()
total_loss += ce_loss.data.float()
return total_loss
def evaluate_ce_r(model, ce_criterion, source_data):
model.eval()
total_loss = 0
batches_id = list(range(0, source_data.size(1), batch_size))
for i, batch in enumerate(batches_id):
len_tar, mask_tar, data, targets = get_length_mask(source_data[:,batch:batch+batch_size])
batch_size_i = data.size()[1]
hidden = model.init_hidden(batch_size_i)
r_output, hidden, log_lin = model(data, hidden, len_tar, mask_tar)
output_flat = r_output.view(-1, ninp)
targets = torch.mul(targets.float(), mask_tar[:,:,0]).long()
curr_loss = torch.mul(ce_criterion(output_flat, targets.view(-1)).view(mask_tar.size()), mask_tar).squeeze().sum(0)
curr_loss = torch.div(curr_loss, len_tar.float()).mean()
total_loss += curr_loss.data.float()
return total_loss / len(batches_id)
def estimate_partition_mc(model, ce_criterion):
# using importance sampling
# Z_lambda = E_{x~q(.)}[P_lambda(x)/q(x)] = E_{x~q(.)}[exp(lambda^T feat(x))]
# sample from q(x) = r(x), use IS to compute expectation w.r.t. p_lambda distribution
# compute expectation using MC samples
batch_size_i = 6500
N = 160
z_samples = 0
for _ in range(N):
if 'wn' in args.train2:
x, log_pi, inp, len_inp, target, mask_tar = sample_wn(model, batch_size_i)
else:
x, log_pi, inp, len_inp, target, mask_tar = sample_data_inp_targ_vary(model, batch_size_i)
hidden = model.init_hidden(batch_size_i)
r_output, _, log_lin = model(inp, hidden, len_inp, mask_tar) # outpt [seq_len ,batch, ntok]
# [ batch x 1 ]
if 'wn' in args.train2:
# for filtered white noise
z_samples += (torch.exp(r_output)!=0).float().mean()
else:
z_samples += torch.exp(log_lin).mean()
return z_samples/N
# ------------------------------------------------
# -------------- train LM using CE ---------------
def single_update_r(model, data, optimizer, lr, criterion):
model.train()
len_tar, mask_tar, data, target = get_length_mask(data)
target = torch.mul(target.float(), mask_tar[:,:,0]).long()
batch_size_i = mask_tar.size()[1]
hidden = model.init_hidden(batch_size_i)
model.zero_grad()
optimizer.zero_grad()
output, hidden = model(data, hidden, 0, mask_tar) # outpt [seq_len ,batch, ntok]
loss = criterion(output.view(-1, ninp), target.view(-1)).view(mask_tar.size())
loss = torch.div(torch.mul(loss, mask_tar).sum(0).squeeze(), len_tar.float()).mean()
loss.backward()
# to prevent the exploding gradient problem
torch.nn.utils.clip_grad_norm(model.parameters(), clip)
optimizer.step()
return loss
def train_r(model, criterion, epoch, source_data, lr, optimizer):
# train proposal r using CE w.r.t. D
total_loss = 0.
start_time = time.time()
#criterion2 = nn.CrossEntropyLoss()
print(batch_size)
batches_id = list(range(0, source_data.size(1), batch_size))
shuffle(batches_id)
all_idx = list(range(source_data.size(1)))
shuffle(all_idx)
source_data = source_data[:,all_idx]
for i, batch in enumerate(batches_id):
loss = single_update_r(model, source_data[:,batch:batch+batch_size], optimizer, lr, criterion)
total_loss += loss.data.float()
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print('| iter {:3d} | {:5d}/{:5d} batches | lr {:02.6f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.5f}'.format(
epoch, i, len(batches_id),lr, get_lr(optimizer),
elapsed * 1000 / log_interval, cur_loss, math.exp(min(cur_loss, 20))))
total_loss = 0
start_time = time.time()
############################# TRAINING-1 #############################
# get proposal r using D
# obtain P_lambda
def training_1():
Epoch_start_time = time.time()
global batch_size
n = args.n
# ----------------------------------------------- train r on original dataset D ------------------------------------------
batch_size = min(args.ds_size, 500)
lr = 0.001
log_dir = os.path.join(args.logdir,'pg_methods/runs/chpt_%s'%(timestamp))
os.mkdir(log_dir)
train_size = size = args.ds_size
feat = [args.feat]
motifs = all_motifs[args.n].split('.')
print('motif %s'%motifs[0])
# (seq_len, nbatch)
test_size = 5000
valid_size = min(max(batch_size, int(0.25*train_size)), 2000)
if args.mtype == 'mult':
train_data_D = train_data = load_data_mult(n, train_size, all_motifs[args.n], 'train')
valid_data_V = valid_data = load_data_mult(n, valid_size, all_motifs[args.n], 'valid')
test_data = load_data_mult(n, test_size, all_motifs[args.n], 'test')
else:
train_data_D = train_data = load_data_motif(n, train_size, all_motifs[args.n], 'train')
valid_data_V = valid_data = load_data_motif(n, valid_size, all_motifs[args.n], 'valid')
test_data = load_data_motif(n, test_size, all_motifs[args.n], 'test')
train_feat = get_features(train_data, motifs, feat).mean(0)
print('orig train ds feat = ', train_feat.cpu().numpy())
print('orig valid ds feat = ', get_features(valid_data,motifs, feat).mean(0).cpu().numpy())
print('orig test ds feat = ', get_features(test_data,motifs, feat).mean(0).cpu().numpy())
if args.wandb:
wandb.log({'true_data_feats': train_feat.cpu().numpy(), 'test_data_feats': get_features(test_data,motifs, feat).mean(0).cpu().numpy()})
best_val_loss = None
counter = 0
patience = 8
writer = SummaryWriter(log_dir=log_dir)
model_r = RNNModel(ntoken, ninp, nhid, nlayers, dropout)
model_r.cuda()
criterion = nn.CrossEntropyLoss(reduction='none')
optimizer_r = optim.Adam(model_r.parameters(), lr=lr)
for epoch in range(1, 100):
epoch_start_time = time.time()
train_r(model_r, criterion, epoch, train_data, lr, optimizer_r)
val_loss = evaluate(model_r, criterion, valid_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {} |'.format(epoch, (time.time() - epoch_start_time),
val_loss))
if args.wandb:
wandb.log({'epoch': epoch, 'r_valid_ce': val_loss})
print('-' * 89)
if not best_val_loss or val_loss < best_val_loss:
with open(os.path.join(log_dir,'chpt_%s_r.pt'%(timestamp)), 'wb') as f:
torch.save(model_r, f)
best_val_loss = val_loss
counter = 0
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
if best_val_loss:
counter += 1
if counter >= patience:
break
del model_r
best_val_loss_r = best_val_loss
model_r = torch.load(os.path.join(log_dir,'chpt_%s_r.pt'%(timestamp)))
test_loss = evaluate(model_r, criterion, test_data)
if args.wandb:
wandb.log({'r_test_ce': test_loss})
if args.tensorboard:
writer.add_scalar('data/r_test_ce', test_loss, 0)
train_loss = evaluate(model_r, criterion, train_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {} | '
'test ppl {}'.format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(min(test_loss, 20))))
print('-' * 89)
entp = entp_motifs[n]
print('\nTheoretical entp = {} for n = {:2d} \n'.format(entp, n))
if args.wandb:
wandb.log({'theor_ent': entp})
test_ce_r = test_loss.float().cpu().numpy().item(0)
epochs = args.epochs
# ------------------------------------- get P_lambda -----------------------------------------
if not 'cyclic' in args.train2 and not 'wn' in args.train2:
lr0 = lr = 10.#0.001
train_size = size = args.ds_size
print('motifs ', motifs)
best_val_loss = None
counter = 0
model_plambda = GAMModel(ntoken, ninp, nhid, nlayers, feat, motifs, dropout)
model_plambda.cuda()
ce_criterion = nn.CrossEntropyLoss(reduction='none')
start_time = time.time()
optimizer_pl = optim.Adam(model_plambda.parameters(), lr=lr)
hash_train = {}
if args.train == 'snis_mix':
for b in range(train_data.size(1)):
x_i = ''.join([str(el) for el in train_data[:,b].cpu().numpy()])
if hash(x_i) in hash_train:
hash_train[hash(x_i)] += [x_i]
else:
hash_train[hash(x_i)] = [x_i]
if args.theta_fixed:
model_dict = model_plambda.state_dict()
pretrained_model = model_r
pretrained_dict = {k: v for k, v in pretrained_model.state_dict().items()
if (k in model_dict) and (v.size() == model_dict[k].size())}
model_dict.update(pretrained_dict)
model_plambda.load_state_dict(model_dict)
test_loss_r = evaluate_ce_r(model_plambda, ce_criterion, test_data)
train_loss_r = evaluate_ce_r(model_plambda, ce_criterion, train_data)
print("test_r {}, train_r {}".
format(test_loss_r.data.float(), train_loss_r.data.float(),))
print('\nTheoretical entp = {:5.4f} for n = {:2d} \n'.format(entp, n))
acceptance_rate = torch.zeros((1)).cuda()
total_samples = 0
accepted = 0
ro_stats = [acceptance_rate, total_samples, accepted]
am_samples = torch.zeros((args.n+2,0)).long().cuda()
for epoch in range(1, epochs+1):
epoch_start_time = time.time()
if args.theta_fixed:
ro_stats, mean_feat, am_samples = cyclic_train_lambda(model_plambda, ce_criterion, epoch,
train_data_D, lr, motifs, feat, ro_stats, optimizer_pl, train_feat, writer, hash_train, am_samples)
#val_loss = evaluate_ce_pl_ds(model_plambda, ce_criterion, valid_data)/(n+1)
# early stopping w.r.t. L1 loss between the moments of the P_lambda and D
l1_feat = torch.abs(train_feat - mean_feat).sum()
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | l1_feat {} '.
format(epoch, (time.time() - epoch_start_time), l1_feat))
print('lambda', model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy(), model_plambda.lin_lambda.bias.data.squeeze().cpu().numpy())
if args.wandb:
wandb.log({'epoch': epoch, 'plambda_l1_feat': l1_feat})
if args.train == 'rs':
acceptance_rate, total_samples, accepted = ro_stats
print('ro', acceptance_rate.data.cpu().numpy(), 'rate', (accepted)/total_samples)
#writer.add_scalar('data/accpt_rate', (accepted)/total_samples, epoch)
print('mean_feat', mean_feat.data.squeeze().cpu().numpy())
print('-' * 89)
lr = lr0/(1 + epoch)
if not best_val_loss or l1_feat < best_val_loss:
with open(os.path.join(log_dir, 'chpt_%s_pl.pt'%(timestamp)), 'wb') as f:
torch.save(model_plambda, f)
best_val_loss = l1_feat #val_loss
counter = 0
else:
if best_val_loss:
counter += 1
if counter >= patience:
break
del model_plambda
model_plambda = torch.load(os.path.join(log_dir, 'chpt_%s_pl.pt'%(timestamp)))
print('-' * 89)
plambda_time = time.time() - start_time
# hybrid model
z_estim = estimate_partition_mc(model_plambda, ce_criterion)
test_loss = evaluate_ce_pl_ds(model_plambda, ce_criterion, test_data, z_estim)/(n+1)
train_loss = evaluate_ce_pl_ds(model_plambda, ce_criterion, train_data, z_estim)/(n+1)
# autoregressive part of the P_lambda
test_loss_r = evaluate_ce_r(model_plambda, ce_criterion, test_data)
train_loss_r = evaluate_ce_r(model_plambda, ce_criterion, train_data)
if args.wandb:
wandb.log({'plambda_test_ce': test_loss, 'plambda_z_estim': z_estim, 'lambda': model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy()})
if args.tensorboard:
writer.add_scalar('data/plambda_test_ce', test_loss, 0)
writer.add_scalar('data/plambda_z_estim', z_estim, 0)
writer.add_histogram('data/lambda', model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy(), 0)
print('\nTheoretical entp = {:5.4f} for n = {:2d} \n'.format(entp, n))
print("test_gams {}, train_gams {}, n {}, ds_size {}, motif {}".
format(test_loss.data.float(), train_loss.data.float(), n, size, motifs))
print("test_r {}, train_r {}".
format(test_loss_r.data.float(), train_loss_r.data.float()))
print('-' * 89)
print('lambda', model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy())
#del model
os.remove(os.path.join(log_dir, 'chpt_%s_pl.pt'%(timestamp)))
test_ce_pl,lambd = [test_loss.data.float().cpu().numpy().item(0),
str(list(model_plambda.lin_lambda.weight.data.squeeze(1).cpu().numpy()))]
else:
# otherwise get the true P_lambda(x) = wn(x)*F(x)
model_plambda, lambd, test_ce_pl = [None]*3
if 'wn' in args.train2:
if 'bwn' in args.train2:
probs = [0.59, 0.4, 0.01]
else:
probs = [(1-1./args.n)/2, (1-1./args.n)/2, 1./args.n]
model_plambda = White_noise_filter(probs, feat, motifs)
model_plambda.cuda()
theor_ent,tstamp = [entp, timestamp]
all_data = [train_feat, train_data, valid_data, test_data]
return model_r, model_plambda, test_ce_r, test_ce_pl,theor_ent,tstamp,lambd, Epoch_start_time, writer, optimizer_r, all_data
def cyclic_train_lambda(model, ce_criterion, epoch, source_data, lr, motifs, feat, ro_stats, optimizer, target_feat, writer, hash_source, am_samples):
model.train()
total_loss = 0.
start_time = time.time()
batches_id = list(range(0, source_data.size(1)))
shuffle(batches_id)
source_data = source_data[:,batches_id]
batches_id = list(range(0, source_data.size(1), batch_size))
shuffle(batches_id)
all_idx = list(range(source_data.size(1)))
shuffle(all_idx)
source_data = source_data[:,all_idx]
nfeat = sum([sum([int(e!='0') for e in el]) for el in feat])
all_mean_feat = torch.zeros(nfeat).cuda()
total_feat = torch.zeros(0, nfeat).cuda()
total_w = torch.zeros(0, 1).cuda()
for i, batch in enumerate(batches_id):
#data, target = get_batch_fsz(source_data, batch)
len_tar, mask_tar, data, target = get_length_mask(source_data[:,batch:batch+batch_size])
batch_size_i = data.size()[1]
hidden = model.init_hidden(batch_size_i)
model.zero_grad()
r_output, hidden, log_lin = model(data, hidden, len_tar, mask_tar) # outpt [seq_len ,batch, ntok]
if args.train == 'rs':
mean_feat, ro_stats, am_samples = cyclic_rejection_sampling(model, ce_criterion, motifs, feat,ro_stats, am_samples)
am_samples = am_samples[:,-50000:]
elif 'snis' in args.train:
# [batch x nfeat]
mean_feat, total_w, total_feat = snis(model, ce_criterion, motifs, feat, source_data, hash_source, total_feat, total_w)
#total_feat = torch.cat((total_feat, curr_feat), dim=0)
#total_w = torch.cat((total_w, curr_w), dim=0)
total_feat = total_feat[-10000:,:]
total_w = total_w[-10000:,:]
all_mean_feat += mean_feat
#target_feat = get_features(target,motifs, feat)
grads = (target_feat - mean_feat).unsqueeze(0)
torch.nn.utils.clip_grad_norm_(grads, clip)
#model.lin_lambda.weight.data.add_(lr, grads.data)
model.lin_lambda.weight.grad = -grads
for n, p in model.named_parameters():
if 'lin_lambda.weight' in n:
p.data.add_(-lr, p.grad.data)
# [ batch x 1 ]
log_r = get_log_r(r_output, target, mask_tar, ce_criterion).sum(0)
P_lambda = torch.exp(log_r + log_lin)
total_loss += P_lambda.mean().data.float() #l1_loss.data.float()
if i % log_interval == 0:# and i > 0:
cur_loss = total_loss / (log_interval)
elapsed = time.time() - start_time
print('grads', grads.data.cpu().numpy())
print('mean_feat', mean_feat.data.cpu().numpy(),'taget_feat', target_feat.data.cpu().numpy())
print('lambda', model.lin_lambda.weight.data.squeeze().cpu().numpy(), model.lin_lambda.bias.data.squeeze().cpu().numpy())
print('| iter {:3d} | {:5d}/{:5d} batches | lr {} | ms/batch {:5.10f} | '
'P_lambda {} '.format(epoch, i, len(batches_id), lr,
elapsed * 1000 / log_interval, cur_loss))
total_loss = 0
start_time = time.time()
return ro_stats, all_mean_feat/len(batches_id), am_samples
def cyclic_distill_rejection_sampling(model, ce_criterion, motifs, feat, ro_stats, ds_size):
# q(x)=r(x), Q(x)>=P_lambda(x) for any x
# sample from LM: x ~ q(x)
# accept with probability ro = P_lambda(x)/Q(x)
nfeat = sum([sum([int(e!='0') for e in el]) for el in feat])
samples = [(torch.ones(1)*PAD).cuda().long()]*ds_size
batch_size_i = 1024
acceptance_rate, total_samples, accepted = ro_stats
count = 0
while count < ds_size:
x, log_pi, inp, len_inp, target, mask_tar = sample_data_inp_targ_vary(model, batch_size_i)
all_feats = get_features(x, motifs, feat)
log_lin = model.lin_lambda(all_feats)
# upper bound: P_lambda(x) <= q(x)*exp(max(lambda * feat))
log_beta = log_upper_bound(model.lin_lambda.weight)
ro = torch.exp(log_lin - log_beta)[:,0].cpu()
acceptance_rate = (total_samples*acceptance_rate + ro.sum())/(total_samples+ro.size(0))
indicator = torch.rand((ro.size(0))) <= ro
total_samples += ro.size(0)
accepted = accepted + indicator.sum().float()
for i in range(indicator.size(0)):
if indicator[i]:
if count >= ds_size:
break
samples[count] = torch.cat((samples[count], x[:,i]), dim=0)
count += 1
if count % 25 == 0:
print('ro', acceptance_rate.data.cpu().numpy(), 'rate', (accepted)/total_samples)
samples_cat = torch.nn.utils.rnn.pad_sequence(samples, batch_first=False, padding_value=PAD)
# samples [ seq_len x ds_size ]
return samples_cat[1:,:ds_size], [acceptance_rate, total_samples, accepted]
############################# TRAINING-2 #############################
# -------------------------------------------------------------------
# ------------------------ Distillation ---------------------------------------
def r_plambda_distill_pitheta(model_plambda, model_r, tstamp, Epoch_start_time, writer, all_data):
n = args.n
epochs = args.epochs
log_dir = os.path.join(args.logdir,'pg_methods/runs/chpt_%s'%(timestamp))
#train_feat, train_data, valid_data, test_data = all_data
entp = entp_motifs[n]
patience = 8
motifs = all_motifs[args.n].split('.')
train_feat, train_data_D, valid_data_V, test_data = all_data
train_size = size = args.ds_size
feat = [args.feat]
print('motifs ', motifs)
# --------------------------------------- distill from P_lambda -----------------------------------------------------
batch_size = min(args.distill_size, 1024)
train_size = size = args.distill_size
# (seq_len, nbatch)
valid_size = min(max(batch_size, int(0.2*train_size)), 2000)
ce_criterion = nn.CrossEntropyLoss(reduction='none')
# GAMModel
print('lambda', model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy(), model_plambda.lin_lambda.bias.data.squeeze().cpu().numpy())
acceptance_rate = torch.zeros((1)).cuda()
total_samples = 0
accepted = 0
ro_stats = [acceptance_rate, total_samples, accepted]
train_data, ro_stats = cyclic_distill_rejection_sampling(model_plambda, ce_criterion, motifs, feat, ro_stats, size)
valid_data, ro_stats = cyclic_distill_rejection_sampling(model_plambda, ce_criterion, motifs, feat, ro_stats, valid_size)
print('train_data', train_data.size(), 'val', valid_data.size())
print('-' * 89)
acceptance_rate, total_samples, accepted = ro_stats
print('ro', acceptance_rate.data.cpu().numpy(), 'rate', (accepted)/total_samples, 'num',
total_samples, 'accpt', accepted)
train_feat_pl = get_features(train_data, motifs, feat)
valid_feat_pl = get_features(valid_data, motifs, feat)
print('plambda train ds feat = ', train_feat_pl.mean(0).cpu().numpy())
print('plambda valid ds feat = ', valid_feat_pl.mean(0).cpu().numpy())
print('orig test ds feat = ', get_features(test_data, motifs, feat).mean(0).cpu().numpy())
mfeat_pl = ((train_feat_pl.sum(0) + valid_feat_pl.sum(0))/(train_feat_pl.size(0)+valid_feat_pl.size(0))).cpu().numpy()
train_l1_pl = np.absolute(train_feat.data.float().cpu().numpy() - mfeat_pl).item(0)
# ----------------------------------------- train pi_theta on distilled dataset -----------------------------------------
batch_size = min(args.distill_size, 500)
lr = 0.001
best_val_loss = None
counter = 0
model_pitheta = RNNModel(ntoken, ninp, nhid, nlayers, dropout)
model_pitheta.cuda()
criterion = nn.CrossEntropyLoss(reduction='none')
optimizer_pi = optim.Adam(model_pitheta.parameters(), lr=lr)
# expand D with the distilled dataset
train_data = cat_variable_length(train_data, train_data_D)
valid_data = cat_variable_length(valid_data, valid_data_V)
print('distilled_D', train_data.size())
print('distilled_V', valid_data.size())
for epoch in range(1, epochs+1):
epoch_start_time = time.time()
train_r(model_pitheta, criterion, epoch, train_data, lr, optimizer_pi)
val_loss = evaluate(model_pitheta, criterion, valid_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {} | '
'valid ppl {}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(min(val_loss, 20))))
print('-' * 89)
if args.wandb:
wandb.log({'epoch': epoch, 'q_val_ce': val_loss})
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)), 'wb') as f:
torch.save(model_pitheta, f)
best_val_loss = val_loss
counter = 0
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
if best_val_loss:
counter += 1
if counter >= patience:
break
Final_duration = (time.time() - Epoch_start_time)/3600.
np.save(os.path.join(log_dir,'train_n%d_f%s_m%d.npy'%(args.n, feat[0], args.motif)), train_data.cpu().numpy())
np.save(os.path.join(log_dir,'valid_n%d_f%s_m%d.npy'%(args.n, feat[0], args.motif)), valid_data.cpu().numpy())
del model_pitheta
model_pitheta = torch.load(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)))
test_loss = evaluate(model_pitheta, criterion, test_data)
train_loss = evaluate(model_pitheta, criterion, train_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {} | '
'test ppl {}'.format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(min(test_loss, 20))))
print('-' * 89)
test_ce_pi = test_loss.float().cpu().numpy().item(0)
if args.wandb:
wandb.log({'pitheta_test_ce': test_ce_pi, 'mfeat_pl_distill':mfeat_pl})
mfeat_pl = str(list(mfeat_pl))
writer.close()
return [test_ce_pi,mfeat_pl,tstamp,Final_duration,train_l1_pl,model_pitheta]
def cyclic_r_plambda_pitheta(model_plambda, model_r, tstamp, Epoch_start_time, writer, optimizer_r, all_data):
# cyclic mode for distillation
n = args.n
train_feat, train_data_D, valid_data_V, test_data_T = all_data
entp = entp_motifs[n]
epochs = args.epochs
log_dir = os.path.join(args.logdir,'pg_methods/runs/chpt_%s'%(timestamp))
# ------------------------------------- training cycle: P_lambda + pi_theta -----------------------------------------
batch_size = 500
batch_size = min(batch_size, args.ds_size)
updates_per_epoch = int((1.0*args.distill_size)/batch_size)
total_loss_cycl = 0.
lr0 = 10.#0.001
lr = 0.001
n = args.n
train_size = size = args.ds_size
feat = [args.feat]
motifs = all_motifs[args.n].split('.')
print('motifs ', motifs)
#writer = SummaryWriter(log_dir=log_dir)
best_val_loss_pi = None
counter_pi = 0
Epochs = 50
model_plambda = GAMModel(ntoken, ninp, nhid, nlayers, feat, motifs, dropout)
model_plambda.cuda()
ce_criterion = nn.CrossEntropyLoss(reduction='none')
start_time = time.time()
optimizer_pl = optim.Adam(model_plambda.parameters(), lr=lr)
hash_train = {}
if args.train == 'snis_mix':
for b in range(train_data.size(1)):
x_i = ''.join([str(el) for el in train_data[:,b].cpu().numpy()])
if hash(x_i) in hash_train:
hash_train[hash(x_i)] += [x_i]
else:
hash_train[hash(x_i)] = [x_i]
distilled_D = torch.zeros((args.n+2,0)).cuda().long()
distilled_V = valid_data_V #torch.zeros((args.n+2,0)).cuda().long()
distilled_V_size = min(max(batch_size, int(0.25*args.distill_size)), 2000)
model_pitheta = model_r
patience = args.rl_patience
patience_distl = 5
patience_dist_pi = 15
counter_dist= 0
flag_pi_distill = False
criterion = nn.CrossEntropyLoss(reduction='none')
best_distill_acceptance_rate = None
for b in range(updates_per_epoch):
if not flag_pi_distill:
# ------------------------------------- train P_lambda on the true dataset D -----------------------------------------
lr_pl = lr0
best_val_loss = None
counter = 0
model_dict = model_plambda.state_dict()
pretrained_dict = {k: v for k, v in model_pitheta.state_dict().items()
if (k in model_dict) and (v.size() == model_dict[k].size())}
model_dict.update(pretrained_dict)
model_plambda.load_state_dict(model_dict)
model_plambda.lin_lambda.bias.data = model_plambda.lin_lambda.bias.data * 0
model_plambda.lin_lambda.weight.data = model_plambda.lin_lambda.weight.data * 0
test_loss_r = evaluate_ce_r(model_plambda, ce_criterion, test_data_T)
train_loss_r = evaluate_ce_r(model_plambda, ce_criterion, train_data_D)
print("test_r {}, train_r {}".
format(test_loss_r.data.float(), train_loss_r.data.float(),))
print('\nTheoretical entp = {:5.4f} for n = {:2d} \n'.format(entp, n))
acceptance_rate = torch.zeros((1)).cuda()
total_samples = 0
accepted = 0
ro_stats = [acceptance_rate, total_samples, accepted]
# keep samples from the fixed theta
am_samples = torch.zeros((args.n+2,0)).long().cuda()
for epoch in range(1, epochs+1):
epoch_start_time = time.time()
if args.theta_fixed:
ro_stats, mean_feat, am_samples = cyclic_train_lambda(model_plambda, ce_criterion, epoch,
train_data_D, lr_pl, motifs, feat, ro_stats, optimizer_pl, train_feat, writer, hash_train, am_samples)
#val_loss = evaluate_ce_pl_ds(model_plambda, ce_criterion, valid_data)/(n+1)
l1_feat = torch.abs(train_feat - mean_feat).sum()
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | l1_feat {} '.
format(epoch, (time.time() - epoch_start_time), l1_feat))
print('lambda', model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy(), model_plambda.lin_lambda.bias.data.squeeze().cpu().numpy())
if args.train == 'rs':
acceptance_rate, total_samples, accepted = ro_stats
print('ro', acceptance_rate.data.cpu().numpy(), 'rate', (accepted)/total_samples)
#writer.add_scalar('data/accpt_rate', (accepted)/total_samples, epoch)
print('mean_feat', mean_feat.data.squeeze().cpu().numpy())
print('-' * 89)
lr_pl = lr0/(1 + epoch)
if not best_val_loss or l1_feat < best_val_loss:
with open(os.path.join(log_dir, 'chpt_%s_pl_i.pt'%(timestamp)), 'wb') as f:
torch.save(model_plambda, f)
best_val_loss = l1_feat #val_loss
counter = 0
else:
if best_val_loss:
counter += 1
if counter >= patience_distl:
break
del model_plambda
model_plambda = torch.load(os.path.join(log_dir, 'chpt_%s_pl_i.pt'%(timestamp)))
writer.close()
print('-' * 89)
del am_samples
plambda_time = time.time() - start_time
print('\nTheoretical entp = {:5.4f} for n = {:2d} \n'.format(entp, n))
print('-' * 89)
print('lambda', model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy())
#del model
os.remove(os.path.join(log_dir, 'chpt_%s_pl_i.pt'%(timestamp)))
test_ce_pl,theor_ent,tstamp,lambd = [999, entp, timestamp,
str(list(model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy()))]
# --------------------------------------- distill batch from P_lambda -----------------------------------------------------
if b == 0:
with open(os.path.join(log_dir, 'chpt_%s_pl.pt'%(timestamp)), 'wb') as f:
torch.save(model_plambda, f)
train_size = size = batch_size
# (seq_len, nbatch)
valid_size = int((1.0*distilled_V_size)/((1.0*args.distill_size)/batch_size))
print('orig test ds feat = ', get_features(test_data_T,motifs, feat).mean(0).cpu().numpy())
ce_criterion = nn.CrossEntropyLoss(reduction='none')
# GAMModel
print('lambda', model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy(), model_plambda.lin_lambda.bias.data.squeeze().cpu().numpy())
acceptance_rate = torch.zeros((1)).cuda()
total_samples = 0
accepted = 0
ro_stats = [acceptance_rate, total_samples, accepted]
train_data, ro_stats = cyclic_distill_rejection_sampling(model_plambda, ce_criterion, motifs, feat, ro_stats, size)
valid_data, ro_stats = cyclic_distill_rejection_sampling(model_plambda, ce_criterion, motifs, feat, ro_stats, valid_size)
distilled_D = cat_variable_length(distilled_D, train_data)
distilled_V = cat_variable_length(distilled_V, valid_data)
print('-' * 89)
acceptance_rate, total_samples, accepted = ro_stats
print('ro', acceptance_rate.data.cpu().numpy(), 'rate', (accepted)/total_samples, 'num',
total_samples, 'accpt', accepted)
distill_acceptance_rate = acceptance_rate.data.cpu().numpy().item(0)
print('-' * 89)
print('| distill acceptance_rate {} '.format(distill_acceptance_rate))
print('-' * 89)
# --------------- cyclically update pi_theta and P_lambda until the desired acceptance rate is reached --------------------------
if not flag_pi_distill:
if not best_distill_acceptance_rate or distill_acceptance_rate > best_distill_acceptance_rate:
with open(os.path.join(log_dir, 'chpt_%s_pl.pt'%(timestamp)), 'wb') as f:
torch.save(model_plambda, f)
best_distill_acceptance_rate = distill_acceptance_rate
counter_dist = 0
else:
if best_distill_acceptance_rate:
counter_dist += 1
if counter_dist >= patience_dist_pi:
flag_pi_distill = True
model_plambda = torch.load(os.path.join(log_dir, 'chpt_%s_pl.pt'%(timestamp)))
if args.train2 == 'cyclic_1':
# ---------------------------------------- pi_theta update on one batch -------------------------------------
ce_loss = single_update_r(model_pitheta, train_data, optimizer_r, lr, criterion)
total_loss_cycl += ce_loss.data.float()
if b % log_interval == 0 and b > 0:
cur_loss = total_loss_cycl / log_interval
elapsed = time.time() - start_time
print('| iter {:3d} | {:5d}/{:5d} batches | lr {:02.6f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.5f}'.format(
Epoch, b, updates_per_epoch, lr,
elapsed * 1000 / log_interval, cur_loss, math.exp(min(cur_loss, 20))))
elif args.train2 == 'cyclic_r':
# ---------------------------------------- retrain pi_theta -------------------------------------
best_val_loss = None
counter = 0
print('retrain pi_theta on the D & distilled dataset')
current_train_data = cat_variable_length(distilled_D, train_data_D)
for epoch in range(1, epochs+1):
epoch_start_time = time.time()
train_r(model_pitheta, criterion, epoch, current_train_data, lr, optimizer_r)
val_loss = evaluate(model_pitheta, criterion, distilled_V)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {} | '
'valid ppl {}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(min(val_loss, 20))))
print('-' * 89)
if not best_val_loss or val_loss < best_val_loss:
with open(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)), 'wb') as f:
torch.save(model_pitheta, f)
best_val_loss = val_loss
counter = 0
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
if best_val_loss:
counter += 1
if counter >= patience_distl:
break
model_pitheta = torch.load(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)))
if distilled_D.size(1) >= args.distill_size:
break
print('distilled_D size', distilled_D.size())
distilled_D = cat_variable_length(distilled_D, train_data_D)
print('distilled_D', distilled_D.size())
print('distilled_V', distilled_V.size())
#if args.train2 == 'cyclic_1':
model_pitheta = RNNModel(ntoken, ninp, nhid, nlayers, dropout)
model_pitheta.cuda()
optimizer_pi = optim.Adam(model_pitheta.parameters(), lr=lr)
# ----------------------------------------- train pi_theta on distilled ds -----------------------------------------
for Epoch in range(Epochs):
print('----- epoch %d ------'%Epoch)
epoch_start_time = time.time()
train_r(model_pitheta, criterion, Epoch, distilled_D, lr, optimizer_pi)
val_loss = evaluate(model_pitheta, criterion, distilled_V)
if args.wandb:
wandb.log({'epoch': Epoch, 'q_val_ce': val_loss})
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {} | '
'valid ppl {}'.format(Epoch, (time.time() - epoch_start_time),
val_loss, math.exp(min(val_loss, 20))))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss_pi or val_loss < best_val_loss_pi:
with open(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)), 'wb') as f:
torch.save(model_pitheta, f)
best_val_loss_pi = val_loss
counter_pi = 0
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
if best_val_loss_pi:
counter_pi += 1
if counter_pi >= patience:
break
# hybrid model
z_estim = estimate_partition_mc(model_plambda, ce_criterion)
test_ce_pl = (evaluate_ce_pl_ds(model_plambda, ce_criterion, test_data_T, z_estim)/(n+1)).data.float().cpu().numpy().item(0)
Final_duration = (time.time() - Epoch_start_time)/3600.
del model_pitheta
model_pitheta = torch.load(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)))
test_loss = evaluate(model_pitheta, criterion, test_data_T)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {} | '
'test ppl {}'.format(Epoch, (time.time() - epoch_start_time),
test_loss, math.exp(min(test_loss, 20))))
print('-' * 89)
test_ce_pi = test_loss.float().cpu().numpy().item(0)
print('train ds feat (P_lambda) = ', get_features(distilled_D, motifs, feat).mean(0).cpu().numpy())
print('valid ds feat (P_lambda) = ', get_features(distilled_V, motifs, feat).mean(0).cpu().numpy())
print('test ds feat (original) = ', get_features(test_data_T,motifs, feat).mean(0).cpu().numpy())
np.save(os.path.join(log_dir,'train_n%d_f%s_m%d.npy'%(args.n, feat[0], args.motif)), distilled_D.cpu().numpy())
np.save(os.path.join(log_dir,'valid_n%d_f%s_m%d.npy'%(args.n, feat[0], args.motif)), distilled_V.cpu().numpy())
train_feat_pl = get_features(distilled_D, motifs, feat)
valid_feat_pl = get_features(distilled_V, motifs, feat)
mfeat_pl = ((train_feat_pl.sum(0) + valid_feat_pl.sum(0))/(train_feat_pl.size(0)+valid_feat_pl.size(0))).cpu().numpy()
train_l1_pl = np.absolute(train_feat.data.float().cpu().numpy() - mfeat_pl).item(0)
if args.wandb:
wandb.log({'pitheta_test_ce': test_ce_pi, 'mfeat_pl_distill':mfeat_pl})
lambd = str(list(model_plambda.lin_lambda.weight.data.squeeze().cpu().numpy()))
mfeat_pl = str(list(mfeat_pl))
return [test_ce_pi,mfeat_pl,tstamp,Final_duration,train_l1_pl, model_pitheta, lambd, test_ce_pl]
# ------------------------------------------------------------------------
# ----------------------------- RL ---------------------------------------
def rl_pitheta(model_plambda, model_r, tstamp, Epoch_start_time, writer, all_data):
n = args.n
train_feat, _, valid_data, test_data = all_data
entp = entp_motifs[n]
epochs = args.epochs
patience = args.rl_patience
log_dir =os.path.join(args.logdir,'pg_methods/runs/chpt_%s'%(timestamp))
lr = args.rl_lr
best_val_loss = None
counter = 0
feat = [args.feat]
motifs = all_motifs[args.n].split('.')
criterion = nn.CrossEntropyLoss(reduction='none')
if args.wandb and not 'wn' in args.train2:
size = 600
acceptance_rate = torch.zeros((1)).cuda()
total_samples = 0
accepted = 0
ro_stats = [acceptance_rate, total_samples, accepted]
distilled_data, ro_stats = cyclic_distill_rejection_sampling(model_plambda, criterion, motifs, feat, ro_stats, size)
print('distilled_data', distilled_data.size())
print('-' * 89)
acceptance_rate, total_samples, accepted = ro_stats
print('ro', acceptance_rate.data.cpu().numpy(), 'rate', (accepted)/total_samples, 'num',
total_samples, 'accpt', accepted)
d_feat_pl = get_features(distilled_data, motifs, feat).mean(0).cpu().numpy()
print('plambda train ds feat = ', d_feat_pl)
wandb.log({'mfeat_pl_distill':d_feat_pl})
print('train pi_theta in %s'%args.train2)
# ----------------------------------------- train pi_theta using policy gradient -----------------------------------------
policy, policy_log = False, False
if 'crit' in args.train2 or 'ac_dpg' in args.train2:
policy = True
#if 'ac_dpg_a' in args.train2:
policy_log = True
model_pitheta = RNNModel(ntoken, ninp, nhid, nlayers, dropout, policy=policy, policy_log=policy_log)
model_pitheta.cuda()
optimizer_pi = optim.Adam(model_pitheta.parameters(), lr=lr)
if 'stable_q' in args.train2:
model_q = init_rnn_from_proposal(model_r, policy_log, policy)
q_val_ce = evaluate(model_q, criterion, valid_data)
print('q_val_ce', q_val_ce)
if args.wandb:
wandb.log({'epoch': 0, 'q_val_ce': q_val_ce})
else:
model_q = None
if 'ac_dpg' in args.train2:
# use stable new network for critic
model_crit = init_rnn_from_proposal(model_r, policy_log, policy)
optimizer_w = optim.Adam(model_crit.parameters(), lr=lr)
optimizers = [optimizer_pi, optimizer_w]
if args.wandb:
val_loss = evaluate(model_pitheta, criterion, valid_data)
wandb.log({'epoch': 0, 'pitheta_valid_ce': val_loss})
if 'stable_q' in args.train2 and 'crit' in args.train2:
print("wrong train2 definition: %s"%args.train2)
raise
z_estim_mc = 1 #estimate_partition_mc(model_plambda, criterion)
print('%s optimizer with lr=%f'%(args.optim, lr))
batch_size = args.rl_mini_batch
nbatches = list(range(0, int((1.0*args.distill_size*args.rl_scale_iter)/batch_size)*batch_size, batch_size))
#if args.wandb:
# wandb.watch(model_pitheta)
for epoch in range(1, epochs+1):
epoch_start_time = time.time()
if 'ac_dpg' in args.train2:
model_pitheta, total_loss, model_q, model_crit = train_pi_theta_ac_dpg(model_pitheta, model_plambda, criterion, epoch, lr, motifs, feat, optimizers, writer, z_estim_mc, model_q, model_crit)
elif 'ppo_fl' in args.train2:
model_pitheta, total_loss, model_q = train_pi_theta_ppo_flat(model_pitheta, model_plambda, criterion, epoch, lr, motifs, feat, optimizer_pi, writer, z_estim_mc, model_q)
elif 'ppo' in args.train2:
model_pitheta, total_loss, model_q = train_pi_theta_ppo(model_pitheta, model_plambda, criterion, epoch, lr, motifs, feat, optimizer_pi, writer, z_estim_mc, model_q)
else:
model_pitheta, total_loss, model_q = train_pi_theta_pg(model_pitheta, model_plambda, criterion, epoch, lr, motifs, feat, optimizer_pi, writer, z_estim_mc, model_q, valid_data)
val_loss = evaluate(model_pitheta, criterion, valid_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid ce {} '.format(epoch, (time.time() - epoch_start_time),
val_loss))
print('-' * 89)
if args.tensorboard:
writer.add_scalar('pitheta/val_loss_ce', val_loss.mean().data.cpu().numpy(), epoch)
if args.wandb:
wandb.log({'epoch': epoch*len(nbatches), 'pitheta_valid_ce': val_loss})
print('pi_theta validation score: {}'.format(val_loss))
if 'stable_q' in args.train2:
if args.wandb:
wandb.log({'epoch': epoch*len(nbatches), 'q_val_ce': q_val_ce})
if q_val_ce > val_loss and not 'stable_q_fix' in args.train2:
model_q = RNNModel(ntoken, ninp, nhid, nlayers, dropout, policy=policy, policy_log=policy_log)
model_q.cuda()
model_q.load_state_dict(model_pitheta.state_dict())
q_val_ce = val_loss
# Save the model if the validation loss is the best we've seen so far.
if epochs%1 == 0 and args.tensorboard:
for name, param in model_pitheta.named_parameters():
writer.add_histogram('pitheta/'+name, param.clone().cpu().data.numpy(), epochs)
if not best_val_loss or val_loss < best_val_loss:
with open(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)), 'wb') as f:
torch.save(model_pitheta, f)
best_val_loss = val_loss
counter = 0
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
if best_val_loss:
counter += 1
if counter >= patience:
break
Final_duration = (time.time() - Epoch_start_time)/3600.
mfeat_pl = 'none'
del model_pitheta
model_pitheta = torch.load(os.path.join(log_dir,'chpt_%s_pi.pt'%(timestamp)))
test_loss = evaluate(model_pitheta, criterion, test_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {} | '
'test ppl {}'.format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(min(test_loss, 20))))
print('-' * 89)
if args.tensorboard:
writer.add_scalar('pitheta/test_loss_ce', test_loss.mean().data.cpu().numpy(), 0)
test_ce_pi = test_loss.float().cpu().numpy().item(0)
writer.close()
if args.wandb:
wandb.log({'pitheta_test_ce': test_ce_pi})
return [test_ce_pi,mfeat_pl,tstamp,Final_duration,999, model_pitheta]
def train_pi_theta_pg(model_pitheta, model_plambda, ce_criterion, epoch, lr, motifs, feat, optimizer_pi, writer, z_estim_mc, model_q, valid_data):
# D-PG and PG modes
# in the critic mode: critic shares parameters with policy
model_pitheta.train()
total_loss = 0.
start_time = time.time()
batch_size = args.rl_mini_batch
nbatches = list(range(0, int((1.0*args.distill_size*args.rl_scale_iter)/batch_size)*batch_size, batch_size))
T_loss = 0
pl_hidden = model_plambda.init_hidden(batch_size)
acceptance_rate = 0
# sample x ~ pi_theta(.)
if 'fix_length' in args.debug_opt:
max_len = args.n+1
else:
max_len = args.max_len #5*args.n
# --------------------------- bebugging: inspect q ---------------------------------
if 'stable_q' in args.train2:
bs = 5000
x, log_pi, inp, len_inp, action, mask_tar = sample_data_inp_targ_vary(model_q, bs, max_len=max_len)
pl_hidden_q = model_plambda.init_hidden(bs)
r_output, _, log_lin = model_plambda(inp, pl_hidden_q, len_inp, mask_tar)
if not 'wn' in args.train2:
log_r = get_log_r(r_output, action, mask_tar, ce_criterion).sum(0) # [batch x 1]
else:
log_r = r_output
P_lambda = torch.exp(log_r + log_lin)
pass_filter = (P_lambda != 0).float().mean()
log_pi_all = get_log_r(log_pi, action, mask_tar, ce_criterion)
log_pi_a_q = log_pi_all.sum(0)
if 'dpg' in args.train2:
#assert P_lambda.size() == log_pi_a.size()
rewards = torch.exp(log_r + log_lin - log_pi_a_q)/z_estim_mc
elif 'pg' in args.train2:
rewards = P_lambda
if not 'wn' in args.train2:
rewards = rewards*(2**args.n)
rewards = rewards.detach()
batches_id = list(range(0, x.size(1)))
shuffle(batches_id)
print('inspect q')
if args.wandb:
wandb.log({'epoch': epoch*len(nbatches), 'q_avg_len': len_inp.float().mean().data.cpu().numpy(),
'q_pass_filter': pass_filter.data.cpu().numpy(),'q_a': wandb.Histogram(torch.exp(log_pi_a_q).data.cpu().numpy()) })
print('x', x[:,batches_id][:,:10].t().squeeze().data.cpu().numpy())
print('q_a', torch.exp(log_pi_a_q)[batches_id][:10].squeeze().data.cpu().numpy())
print('len_inp', len_inp[batches_id][:10].squeeze().data.cpu().numpy())
print('pass', (P_lambda != 0)[batches_id][:10].squeeze().data.cpu().numpy())
print('pass_filter', pass_filter.squeeze().data.cpu().numpy())
# -----------------------------------------------------------------------
for jj, i in enumerate(nbatches):
model_pitheta.zero_grad()
optimizer_pi.zero_grad()
if not 'stable_q' in args.train2:
model_q = model_pitheta
if 'crit' in args.train2:
x, log_pi, inp, len_inp, action, mask_tar, est_z = sample_data_inp_targ_vary(model_q, batch_size, max_len=max_len, critic=True)
# get values for the last action
# [seq_len x batch x 1] -> [batch x 1]
est_z = torch.sum(est_z.squeeze().t().contiguous() * to_one_hot(len_inp-1, n_dims=est_z.size(0)), dim = 1).unsqueeze(1)
if not 'stable_q' in args.train2:
hidden = model_q.init_hidden(inp.size(1))
log_pi, hidden, est_z = model_q(inp, hidden, 0, mask_tar, critic=True)
len_inp, mask_tar, inp, action = get_length_mask(x)
else:
x, log_pi, inp, len_inp, action, mask_tar = sample_data_inp_targ_vary(model_q, batch_size, max_len=max_len)
if not 'stable_q' in args.train2:
hidden = model_q.init_hidden(inp.size(1))
log_pi, hidden = model_q(inp, hidden, 0, mask_tar)
len_inp, mask_tar, inp, action = get_length_mask(x)
# get Plambda(x)
r_output, _, log_lin = model_plambda(inp, pl_hidden, len_inp, mask_tar)
if not 'wn' in args.train2:
log_r = get_log_r(r_output, action, mask_tar, ce_criterion).sum(0) # [batch x 1]
else:
log_r = r_output
P_lambda = torch.exp(log_r + log_lin)
# [(n+1) x batch x 1]
log_pi_all = get_log_r(log_pi, action, mask_tar, ce_criterion)
log_pi_a_q = log_pi_all.sum(0)
if 'dpg' in args.train2:
#assert P_lambda.size() == log_pi_a.size()
rewards = torch.exp(log_r + log_lin - log_pi_a_q)/z_estim_mc
elif 'pg' in args.train2:
rewards = P_lambda
rewards = rewards.detach()
returns = rewards.clone()
if 'crit' in args.train2:
assert rewards.size() == est_z.size()
value_loss = 0.5 * (rewards - est_z).pow(2).mean()
rewards = rewards - est_z.detach()
if 'stable_q' in args.train2:
hidden = model_pitheta.init_hidden(inp.size(1))
log_pi, hidden = model_pitheta(inp, hidden, len_inp, mask_tar) # outpt [seq_len ,batch, ntok]
log_pi_a = get_log_r(log_pi, action, mask_tar, ce_criterion).sum(0)
else:
log_pi_a = log_pi_a_q
acceptance_rate += (rewards!=0).float().mean().cpu().numpy().item(0)
tr_feats_pi = get_features(x, motifs, feat).mean(0).cpu().numpy()
J_theta = -1*torch.mul(log_pi_a, rewards)
loss = J_theta.mean()
if 'crit' in args.train2:
loss += args.rl_value_loss_coeff*value_loss
total_loss += loss.data.float()
T_loss += loss.data.float()
if jj % log_interval == 0 and jj > 0:
print('rewards', rewards.mean().cpu().numpy(), 'plambda', P_lambda.data.mean().cpu().numpy(), 'train ds feat = ', tr_feats_pi)
idx = random.randrange(x.size(1))
print(x[:,idx].cpu().numpy())
cur_loss = total_loss / (log_interval)
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | seq_len {} | acc_rate {:5.8f} | '
'pg loss {}'.format(
epoch, jj, len(nbatches),
np.round(len_inp.float().mean().data.cpu().numpy(),decimals=1),
acceptance_rate / log_interval , cur_loss))
acceptance_rate = 0
total_loss = 0
start_time = time.time()
model_q.train()
model_pitheta.train()
loss.backward()
if jj % log_interval == 0 and jj > 0:
if args.tensorboard:
writer.add_scalar('pitheta/rewards', rewards.mean().data.cpu().numpy(), epoch*(len(nbatches))+jj)
if 'crit' in args.train2:
writer.add_scalar('pitheta/critic', est_z.mean().data.cpu().numpy(), epoch*(len(nbatches))+jj)
writer.add_scalar('pitheta/P_lambda', P_lambda.mean().data.cpu().numpy(), epoch*(len(nbatches))+jj)
writer.add_scalar('pitheta/r(x)', torch.exp(log_r).mean().data.cpu().numpy(), epoch*(len(nbatches))+jj)
writer.add_scalar('pitheta/seq_len', len_inp.float().mean().data.cpu().numpy(), epoch*(len(nbatches))+jj)
writer.add_histogram('pitheta/tr_feats_sampled', tr_feats_pi, epoch*(len(nbatches))+jj)
for name, param in model_pitheta.named_parameters():
if name == 'encoder.weight': continue
writer.add_histogram('pitheta/grad_'+name, param.grad.data.cpu().numpy(), epoch*(len(nbatches))+jj)
if args.wandb:
if 'crit' in args.train2:
wandb.log({'epoch': epoch*len(nbatches)+jj, 'rewards': returns.mean().data.cpu().numpy(),
'pitheta_P_lambda': P_lambda.mean().data.cpu().numpy(), 'pitheta_r(x)': torch.exp(log_r).mean().data.cpu().numpy(),
'pitheta_seq_len': len_inp.float().mean().data.cpu().numpy(), 'pitheta_feats': tr_feats_pi, 'advantage':rewards.mean().data.cpu().numpy(), 'est_z':est_z.mean().data.cpu().numpy()})
else:
wandb.log({'epoch': epoch*len(nbatches)+jj, 'rewards': rewards.mean().data.cpu().numpy(),
'pitheta_P_lambda': P_lambda.mean().data.cpu().numpy(), 'pitheta_r(x)': torch.exp(log_r).mean().data.cpu().numpy(),
'pitheta_seq_len': len_inp.float().mean().data.cpu().numpy(), 'pitheta_feats': tr_feats_pi})
# to prevent the exploding gradient problem
torch.nn.utils.clip_grad_norm(model_pitheta.parameters(), clip)
if args.optim == 'manual_lr':
for n, p in model_pitheta.named_parameters():
if n == 'encoder.weight': continue
p.data.add_(-lr, p.grad.data)
else:
optimizer_pi.step()
return model_pitheta, T_loss/len(nbatches), model_q
def ppo_update_one_epoch(model_pitheta, trajectories, ce_criterion, optimizer_pi, lr, epoch, writer):
#batch_size = 4000
batch_size = args.rl_mini_batch
clip_param = args.rl_clip_param
source_data = trajectories['x']
batches_id = list(range(0, source_data.size(1), batch_size))
shuffle(batches_id)
all_idx = list(range(source_data.size(1)))
shuffle(all_idx)
trajectories['x'] = trajectories['x'][:,all_idx]
trajectories['logpi_k_a'] = trajectories['logpi_k_a'][all_idx]
if 'crit' in args.train2:
trajectories['r'] = trajectories['r'][all_idx]
trajectories['adv'] = trajectories['adv'][all_idx]
T_loss = 0
total_loss = 0
approx_ent = 0
approx_kl = 0
for i, batch in enumerate(batches_id):
# source_data[:,batch:batch+batch_size]
x = trajectories['x'][:, batch:batch+batch_size]
len_tar, mask_tar, data, action = get_length_mask(x)
action = torch.mul(action.float(), mask_tar[:,:,0]).long()
batch_size_i = mask_tar.size()[1]
hidden = model_pitheta.init_hidden(x.size(1))
model_pitheta.zero_grad()
optimizer_pi.zero_grad()
if 'crit' in args.train2:
pi_output, hidden, est_z = model_pitheta(data, hidden, 0, mask_tar, critic=True)
else:
pi_output, hidden = model_pitheta(data, hidden, 0, mask_tar) # outpt [seq_len ,batch, ntok]
log_pi = get_log_r(pi_output, action, mask_tar, ce_criterion).sum(0) # [(n+1) x batch x 1] -> [batch x 1]
logpi_k_a = trajectories['logpi_k_a'][batch:batch+batch_size]
adv_targ = trajectories['adv'][batch:batch+batch_size]
# PPO objectives
ratio = torch.exp(log_pi - logpi_k_a)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * adv_targ
loss = J_theta = -torch.min(surr1, surr2).mean()
approx_kl += (- log_pi + logpi_k_a).mean().data.cpu().numpy().item(0)
approx_ent += (- log_pi).mean().data.cpu().numpy().item(0)
if 'crit' in args.train2:
rewards = trajectories['r'][batch:batch+batch_size]
value_loss = 0.5 * (rewards - est_z).pow(2).mean()
loss += value_loss
idx = random.randrange(x.size(1))
total_loss += loss.data.float()
T_loss += loss.data.float()
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / (log_interval)
print('| iter {:3d} | {:5d}/{:5d} batches | seq_len {:3.1f} | '
'pi_ppo loss {} | kl {} | ent {}'.format(
epoch, i, len(batches_id),
np.round(len_tar.float().mean().data.cpu().numpy(),decimals=1),
cur_loss, approx_kl/(i+1), approx_ent/(i+1)))
print(x[:,idx].cpu().numpy())
total_loss = 0
loss.backward()
if i % log_interval == 0 and args.tensorboard:
for name, param in model_pitheta.named_parameters():
if name == 'encoder.weight': continue
writer.add_histogram('pitheta/grad_'+name, param.grad.data.cpu().numpy(), epoch*len(batches_id)+i)
#writer.add_histogram('data/rewards', rewards.clone().cpu().data.numpy(), epoch*(len(nbatches))+jj)
#writer.add_histogram('data/r(x)', torch.exp(log_r).clone().cpu().data.numpy(), epoch*(len(nbatches))+jj)
#writer.add_scalar('data/train_J_theta', loss, epoch*(len(nbatches))+jj)
#writer.add_scalar('data/seq_len', len_inp.float().mean().data, epoch*(len(nbatches))+jj)
# to prevent the exploding gradient problem
torch.nn.utils.clip_grad_norm(model_pitheta.parameters(), clip)
if args.optim == 'manual_lr':
for n, p in model_pitheta.named_parameters():
if n == 'encoder.weight': continue
p.data.add_(-lr, p.grad.data)
else:
optimizer_pi.step()
return model_pitheta, approx_kl/len(batches_id), approx_ent/len(batches_id), T_loss/len(batches_id)
def ppo_update_one_epoch_flat(model_pitheta, trajectories, ce_criterion, optimizer_pi, lr, epoch, writer):
#batch_size = 4000
batch_size = args.rl_mini_batch
clip_param = args.rl_clip_param
source_data = trajectories['inp']
batches_id = list(range(0, source_data.size(1), batch_size))
shuffle(batches_id)
all_idx = list(range(source_data.size(1)))
shuffle(all_idx)
#print(trajectories['tar'].size(1), trajectories['logpi_k_a'].size(0), trajectories['c_hid'].size(1), trajectories['r'].size(0))
assert trajectories['tar'].size(1) == trajectories['logpi_k_a'].size(0) == trajectories['c_hid'].size(1) == trajectories['r'].size(0)
trajectories['tar'] = trajectories['tar'][:,all_idx]
trajectories['inp'] = trajectories['inp'][:,all_idx]
trajectories['logpi_k_a'] = trajectories['logpi_k_a'][all_idx]
trajectories['c_hid'] = trajectories['c_hid'][:,all_idx,:]
trajectories['hid'] = trajectories['hid'][:,all_idx,:]
if 'crit' in args.train2:
trajectories['r'] = trajectories['r'][all_idx]
trajectories['adv'] = trajectories['adv'][all_idx]
T_loss = 0
total_loss = 0
approx_ent = 0
approx_kl = 0
for i, batch in enumerate(batches_id):
data, action = trajectories['inp'][:,batch:batch+batch_size].cuda(), trajectories['tar'][:,batch:batch+batch_size].cuda()
# [1 x batch x 1]
mask_tar = (action != PAD).unsqueeze(2).float().cuda()
action = torch.mul(action.float(), mask_tar[:,:,0]).long()
#batch_size_i = mask_tar.size(1)
model_pitheta.zero_grad()
optimizer_pi.zero_grad()
hidden = (trajectories['hid'][:, batch:batch+batch_size].contiguous().cuda(), trajectories['c_hid'][:, batch:batch+batch_size].contiguous().cuda())
# outpt [ 1 x seq_len*batch x ntok]
if 'crit' in args.train2:
pi_output, hidden, est_z = model_pitheta(data, hidden, 0, mask_tar, critic=True)
else:
pi_output, hidden = model_pitheta(data, hidden, 0, mask_tar)
log_pi = get_log_r(pi_output, action, mask_tar, ce_criterion)[0,:,:] # [1 x batch x 1]
logpi_k_a = trajectories['logpi_k_a'][batch:batch+batch_size].cuda()
adv_targ = trajectories['adv'][batch:batch+batch_size].cuda()
# PPO objectives
ratio = torch.exp(log_pi - logpi_k_a)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * adv_targ
loss = J_theta = -torch.min(surr1, surr2).mean()
approx_kl += (- log_pi + logpi_k_a).mean().data.cpu().numpy().item(0)
approx_ent += (- log_pi).mean().data.cpu().numpy().item(0)
if 'crit' in args.train2:
rewards = trajectories['r'][batch:batch+batch_size].cuda()
value_loss = 0.5 * (rewards - est_z).pow(2).mean()
loss += value_loss
#print('rewards', rewards.mean().cpu().numpy(), 'train ds feat = ', get_features(x, motifs, feat).mean(0).cpu().numpy())
idx = random.randrange(data.size(1))
total_loss += loss.data.float()
T_loss += loss.data.float()
if i % log_interval*10 == 0 and i > 0:
cur_loss = total_loss / (log_interval)
print('| iter {:3d} | {:5d}/{:5d} batches | '
'pi_ppo loss {} | kl {} '.format(
epoch, i, len(batches_id),
cur_loss, approx_kl/(i+1)))
#print(data[:,idx].cpu().numpy())
total_loss = 0
loss.backward()
if i % log_interval*10 == 0 and args.tensorboard:
for name, param in model_pitheta.named_parameters():
if name == 'encoder.weight': continue
writer.add_histogram('pitheta/grad_'+name, param.grad.data.cpu().numpy(), epoch*len(batches_id)+i)
#writer.add_histogram('data/rewards', rewards.clone().cpu().data.numpy(), epoch*(len(nbatches))+jj)
#writer.add_histogram('data/r(x)', torch.exp(log_r).clone().cpu().data.numpy(), epoch*(len(nbatches))+jj)
#writer.add_scalar('data/train_J_theta', loss, epoch*(len(nbatches))+jj)
#writer.add_scalar('data/seq_len', len_inp.float().mean().data, epoch*(len(nbatches))+jj)
# to prevent the exploding gradient problem
torch.nn.utils.clip_grad_norm(model_pitheta.parameters(), clip)
if args.optim == 'manual_lr':
for n, p in model_pitheta.named_parameters():
if n == 'encoder.weight': continue
p.data.add_(-lr, p.grad.data)
else:
optimizer_pi.step()
return model_pitheta, approx_kl/len(batches_id), approx_ent/len(batches_id), T_loss/len(batches_id)
# PPO for critic case:
# advantage = Q(s,a) - V(s)
def train_pi_theta_ppo(model_pitheta, model_plambda, ce_criterion, epoch, lr, motifs, feat, optimizer_pi, writer, z_estim_mc, model_q):
# collect trajectories using current pi_theta_k
# find a new pi_theta that maximizes the PPO objective on these trajectories
# use approximate KL for early stopping
batch_size = 4000
N = int((1.0*args.distill_size*args.rl_scale_iter)/batch_size)
print('number of workers %d'%N)
trajectories = {'r': torch.zeros((0,1)).cuda(),
'adv': torch.zeros((0,1)).cuda(),
'x': torch.zeros((args.n+2,0)).cuda().long(),
'logpi_k_a': torch.zeros((0,1)).cuda(),
}
epochs = 40
target_kl = args.rl_target_kl
if 'fix_length' in args.debug_opt:
max_len = args.n+1
else:
max_len = 5*args.n
pl_hidden = model_plambda.init_hidden(batch_size)
for wrkr in range(N):
# sample x ~ pi_theta(.)
if 'crit' in args.train2:
x, log_pi, inp, len_inp, action, mask_tar, est_z = sample_data_inp_targ_vary(model_pitheta, batch_size, max_len=max_len, critic=True)
# get values for the last action
# [seq_len x batch x 1] -> [batch x 1]
est_z = torch.sum(est_z.squeeze().t().contiguous() * to_one_hot(len_inp-1, n_dims=est_z.size(0)), dim = 1).unsqueeze(1)
else:
x, log_pi, inp, len_inp, action, mask_tar = sample_data_inp_targ_vary(model_pitheta, batch_size, max_len=max_len)
# get Plambda(x)
r_output, _, log_lin = model_plambda(inp, pl_hidden, len_inp, mask_tar)
if not 'wn' in args.train2:
log_r = get_log_r(r_output, action, mask_tar, ce_criterion).sum(0) # [batch x 1]
else:
log_r = r_output
P_lambda = torch.exp(log_r + log_lin)
# [(n+1) x batch x 1]
log_pi_a = get_log_r(log_pi, action, mask_tar, ce_criterion)
log_pi_a = log_pi_a.sum(0)
if 'dppo' in args.train2:
rewards = torch.exp(log_r + log_lin - log_pi_a)/z_estim_mc
elif 'ppo' in args.train2:
rewards = P_lambda
if not 'wn' in args.train2:
rewards = rewards*(2**args.n)
advantage = rewards.clone()
if 'crit' in args.train2:
assert rewards.size() == est_z.size()
advantage = rewards - est_z
print('rewards', rewards.mean().data.cpu().numpy(), 'P_lambda', P_lambda.mean().data.cpu().numpy())
idx = random.randrange(x.size(1))
print(x[:,idx].cpu().numpy())
if args.wandb:
if 'crit' in args.train2:
wandb.log({'epoch': epoch*N+wrkr, 'rewards': rewards.mean().data.cpu().numpy(),
'pitheta_P_lambda': P_lambda.mean().data.cpu().numpy(),
'pitheta_seq_len': len_inp.float().mean().data.cpu().numpy(), 'advantage':advantage.mean().data.cpu().numpy(), 'est_z':est_z.mean().data.cpu().numpy()})
else:
wandb.log({'epoch': epoch*N+wrkr, 'rewards': rewards.mean().data.cpu().numpy(),
'pitheta_P_lambda': P_lambda.mean().data.cpu().numpy(),
'pitheta_seq_len': len_inp.float().mean().data.cpu().numpy()})
if args.tensorboard:
writer.add_scalar('pitheta/rewards', rewards.mean().data.cpu().numpy(), epoch*N+wrkr)
writer.add_scalar('pitheta/P_lambda', P_lambda.mean().data.cpu().numpy(), epoch*N+wrkr)
writer.add_scalar('pitheta/seq_len', len_inp.float().mean().data.cpu().numpy(), epoch*N+wrkr)
# [seq_len x batch]
#trajectories['a'] = torch.cat(( trajectories['a'], action), dim=0)
if 'crit' in args.train2:
trajectories['r'] = torch.cat(( trajectories['r'], rewards), dim=0).detach()
trajectories['x'] = cat_variable_length(trajectories['x'], x).detach()
# [batch x 1]
trajectories['adv'] = torch.cat(( trajectories['adv'], advantage), dim=0).detach()
trajectories['logpi_k_a'] = torch.cat(( trajectories['logpi_k_a'], log_pi_a), dim=0).detach()
tr_feats = get_features(trajectories['x'], motifs, feat).mean(0).cpu().numpy()
print('train ds feat = ', tr_feats)
model_pitheta.train()
total_loss = 0.
start_time = time.time()
if args.tensorboard:
writer.add_histogram('pitheta/tr_feats_sampled', tr_feats, epoch)
T_loss = 0
for e in range(epochs):
model_pitheta, approx_kl, approx_ent, loss = ppo_update_one_epoch(model_pitheta, trajectories, ce_criterion, optimizer_pi, lr, e, writer)
total_loss += loss.data.float()
T_loss += loss.data.float()
if e % log_interval == 0 and e > 0:
cur_loss = total_loss / (log_interval)
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} iters | kl {} | ent {:5.8f} | '
'pg loss {}'.format(
epoch, e, epochs,
approx_kl, approx_ent , cur_loss))
total_loss = 0
start_time = time.time()
if args.wandb:
wandb.log({'epoch': epoch*epochs + e, 'pitheta_ppo_loss': loss, 'pitheta_approx_kl': approx_kl})
if args.tensorboard:
writer.add_scalar('pitheta/approx_kl', approx_kl, epoch*epochs+e)
writer.add_scalar('pitheta/ppo_loss',loss.mean().data.cpu().numpy(), epoch*epochs+e)
if approx_kl > 1.5 * target_kl:
print('Early stopping at step %d due to reaching max kl.'%e)
break
return model_pitheta, T_loss/epochs, model_q
def train_pi_theta_ppo_flat(model_pitheta, model_plambda, ce_criterion, epoch, lr, motifs, feat, optimizer_pi, writer, z_estim_mc, model_q):
# collect trajectories using current pi_theta_k for every action individually
# find a new pi_theta that maximizes the PPO objective on these trajectories
# use approximate KL for early stopping
batch_size = 4000
N = 20
trajectories = {'r': torch.zeros((0,1)),
'adv': torch.zeros((0,1)),
'inp': torch.zeros((1,0)).long(),
'tar': torch.zeros((1,0)).long(),
'logpi_k_a': torch.zeros((0,1)),
'hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid),
'c_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid),
}
epochs = 40
target_kl = args.rl_target_kl
all_x = torch.zeros((args.n+2,0)).cuda().long()
if 'fix_length' in args.debug_opt:
max_len = args.n+1
else:
max_len = 3*args.n
pl_hidden = model_plambda.init_hidden(batch_size)
for wrkr in range(N):
# sample batch of x ~ pi_theta(.)
if 'crit' in args.train2:
x, log_pi, inp, len_inp, action, mask_tar, hids, c_hids, est_z = sample_data_inp_targ_vary_hid(model_pitheta, batch_size, max_len=max_len, critic=True)
# est_z: [seq_len x batch x 1] -> []
est_z = est_z.view(-1, 1)
else:
x, log_pi, inp, len_inp, action, mask_tar, hids, c_hids = sample_data_inp_targ_vary_hid(model_pitheta, batch_size, max_len=max_len)
# get Plambda(x)
r_output, _, log_lin = model_plambda(inp, pl_hidden, len_inp, mask_tar)
if not 'wn' in args.train2:
log_r = get_log_r(r_output, action, mask_tar, ce_criterion).sum(0) # [batch x 1]
else:
log_r = r_output
P_lambda = torch.exp(log_r + log_lin)
# [(n+1) x batch x 1]
#print('x', x.size(), 'log_pi', log_pi.size(), 'a', action.size())
log_pi_a_all = get_log_r(log_pi, action, mask_tar, ce_criterion)
log_pi_a = log_pi_a_all.sum(0)
if 'dppo' in args.train2:
if 'crit' in args.train2:
# [batch x 1] -> [(n+1) x batch]
log_P_lambda = (log_r + log_lin).repeat(1, inp.size(0)).t().contiguous()
assert log_P_lambda.size() == log_pi_a_all.squeeze().size()
# unbiased estimate for Z(s) = P_lambda(x)/pi_theta(x|s)
log_pi_x_s = log_pi_a_all.squeeze().clone()
for i in range(log_pi_x_s.size(0)):
for j in range(log_pi_x_s.size(1)):
log_pi_x_s[i,j] = log_pi_a_all[i:,j].sum()
rewards = torch.exp(log_P_lambda - log_pi_x_s).view(-1, 1)
advantage = rewards - est_z
assert rewards.size() == est_z.size()
else:
rewards = torch.exp(log_r + log_lin - log_pi_a)
elif 'ppo' in args.train2:
if 'crit' in args.train2:
rewards = P_lambda.repeat(1, inp.size(0)).view(-1,1)
advantage = rewards - est_z
else:
rewards = P_lambda
if not 'wn' in args.train2:
rewards = rewards*(2**args.n)
if not 'crit' in args.train2:
# [batch x 1] -> [batch x (n+1)] -> [(n+1) * batch x 1]
advantage = rewards = rewards.repeat(1, inp.size(0)).t().contiguous().view(-1,1)
print('rewards', rewards.mean().data.cpu().numpy(), 'P_lambda', P_lambda.mean().data.cpu().numpy())
idx = random.randrange(x.size(1))
print(x[:,idx].cpu().numpy())
# [ batch*seq_len x 1]
log_pi_a_all = log_pi_a_all.view(-1, 1)
inp = inp.view(1, -1)
action = action.view(1, -1)
if args.wandb:
if 'crit' in args.train2:
wandb.log({'epoch': epoch*N+wrkr, 'rewards': rewards.mean().data.cpu().numpy(),
'pitheta_P_lambda': P_lambda.mean().data.cpu().numpy(),
'pitheta_seq_len': len_inp.float().mean().data.cpu().numpy(), 'advantage':advantage.mean().data.cpu().numpy(), 'est_z':est_z.mean().data.cpu().numpy()})
else:
wandb.log({'epoch': epoch*N+wrkr, 'rewards': rewards.mean().data.cpu().numpy(),
'pitheta_P_lambda': P_lambda.mean().data.cpu().numpy(),
'pitheta_seq_len': len_inp.float().mean().data.cpu().numpy()})
if args.tensorboard:
writer.add_scalar('pitheta/rewards', rewards.mean().data.cpu().numpy(), epoch*N+wrkr)
writer.add_scalar('pitheta/P_lambda', P_lambda.mean().data.cpu().numpy(), epoch*N+wrkr)
writer.add_scalar('pitheta/seq_len', len_inp.float().mean().data.cpu().numpy(), epoch*N+wrkr)
all_x = cat_variable_length(all_x, x).detach()
# due to the small GPU memory move trajectories to cpu
trajectories['c_hid'] = torch.cat((trajectories['c_hid'], c_hids), dim=1).detach()
trajectories['hid'] = torch.cat((trajectories['hid'], hids), dim=1).detach()
trajectories['inp'] = torch.cat((trajectories['inp'], inp.cpu()), dim=1).detach()
trajectories['tar'] = torch.cat((trajectories['tar'], action.cpu()), dim=1).detach()
# [batch x 1]
if 'crit' in args.train2:
trajectories['r'] = torch.cat(( trajectories['r'], rewards.cpu()), dim=0).detach()
trajectories['adv'] = torch.cat(( trajectories['adv'], advantage.cpu()), dim=0).detach()
trajectories['logpi_k_a'] = torch.cat(( trajectories['logpi_k_a'], log_pi_a_all.cpu()), dim=0).detach()
tr_feats_pi = get_features(all_x, motifs, feat).mean(0).cpu().numpy()
print('train ds feat = ', tr_feats_pi)
if args.wandb:
wandb.log({'epoch': epoch, 'pitheta_feats': tr_feats_pi})
if args.tensorboard:
writer.add_histogram('pitheta/tr_feats_sampled', tr_feats_pi, epoch)
model_pitheta.train()
total_loss = 0.
start_time = time.time()
T_loss = 0
for e in range(epochs):
model_pitheta, approx_kl, approx_ent, loss = ppo_update_one_epoch_flat(model_pitheta, trajectories, ce_criterion, optimizer_pi, lr, e, writer)
total_loss += loss.data.float()
T_loss += loss.data.float()
if args.wandb:
wandb.log({'epoch': epoch*epochs + e, 'pitheta_ppo_loss': loss, 'pitheta_approx_kl': approx_kl})
if args.tensorboard:
writer.add_scalar('pitheta/approx_kl', approx_kl, epoch*epochs+e)
writer.add_scalar('pitheta/ppo_loss',loss.mean().data.cpu().numpy(), epoch*epochs+e)
if e % log_interval == 0 and e > 0:
cur_loss = total_loss / (log_interval)
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} iters | kl {} | ent {:5.8f} | '
'pg loss {}'.format(
epoch, e, epochs,
approx_kl, approx_ent , cur_loss))
total_loss = 0
start_time = time.time()
if approx_kl > 1.5 * target_kl:
print('Early stopping at step %d due to reaching max kl.'%e)
break
trajectories = None
torch.cuda.empty_cache()
return model_pitheta, T_loss/epochs, model_q
def ac_dpg_update_one_epoch(model_pitheta, trajectories, ce_criterion, optimizers, lr, epoch, writer, model_crit, Epoch):
#batch_size = 4000
model_pitheta.train()
model_crit.train()
batch_size = args.rl_mini_batch
clip_param = args.rl_clip_param
[optimizer_pi, optimizer_w] = optimizers
source_data = trajectories['inp']
batches_id = list(range(0, source_data.size(1), batch_size))
shuffle(batches_id)
all_idx = list(range(source_data.size(1)))
shuffle(all_idx)
#print(trajectories['tar'].size(), trajectories['c_hid'].size(), trajectories['r'].size())
assert trajectories['tar'].size(1) == trajectories['q_c_hid'].size(1) == trajectories['r'].size(0)
trajectories['tar'] = trajectories['tar'][:,all_idx]
trajectories['inp'] = trajectories['inp'][:,all_idx]
trajectories['q_c_hid'] = trajectories['q_c_hid'][:,all_idx,:]
trajectories['q_hid'] = trajectories['q_hid'][:,all_idx,:]
trajectories['cr_c_hid'] = trajectories['cr_c_hid'][:,all_idx,:]
trajectories['cr_hid'] = trajectories['cr_hid'][:,all_idx,:]
trajectories['r'] = trajectories['r'][all_idx]
trajectories['adv'] = trajectories['adv'][all_idx]
T_loss, V_loss = 0, 0
total_loss = 0
approx_ent = 0
approx_kl = 0
for i, batch in enumerate(batches_id):
data, action = trajectories['inp'][:,batch:batch+batch_size].cuda(), trajectories['tar'][:,batch:batch+batch_size].cuda()
# [1 x batch x 1]
mask_tar = (action != PAD).unsqueeze(2).float().cuda()
action = torch.mul(action.float(), mask_tar[:,:,0]).long()
#batch_size_i = mask_tar.size(1)
model_pitheta.zero_grad()
optimizer_pi.zero_grad()
model_crit.zero_grad()
optimizer_w.zero_grad()
hidden = (trajectories['q_hid'][:, batch:batch+batch_size].contiguous().cuda(), trajectories['q_c_hid'][:, batch:batch+batch_size].contiguous().cuda())
# outpt: [ 1 x batch x ntok]
pi_output, _, _ = model_pitheta(data, hidden, 0, mask_tar, critic=True)
hidden = (trajectories['cr_hid'][:, batch:batch+batch_size].contiguous().cuda(), trajectories['cr_c_hid'][:, batch:batch+batch_size].contiguous().cuda())
# est_z: [1 x batch x ntok]
_, _, est_z = model_crit(data, hidden, 0, mask_tar, critic=True)
log_pi = get_log_r(pi_output, action, mask_tar, ce_criterion)[0,:,:] # [batch x 1]
adv_targ = trajectories['adv'][batch:batch+batch_size].cuda()
# AC D-PG objectives
loss = J_theta = -1*torch.mul(log_pi, adv_targ).mean()
rewards = trajectories['r'][batch:batch+batch_size].cuda()
if 'ac_dpg_a' in args.train2:
# add leaves to get value at the parent node
est_z_s = est_z.sum(2).squeeze(0)
else:
# no leaves, parent node in the log domain
est_z_s = est_z.squeeze()
value_loss = args.rl_value_loss_coeff * (rewards.squeeze() - est_z_s).pow(2).mean()
idx = random.randrange(data.size(1))
total_loss += loss.data.float()
T_loss += loss.data.float()
V_loss += value_loss.data.float()
if i % log_interval*10 == 0 and i > 0:
cur_loss = total_loss / (log_interval)
print('| iter {:3d} | {:5d}/{:5d} batches | '
'policy loss {} '.format(
epoch, i, len(batches_id),
cur_loss))
#print(data[:,idx].cpu().numpy())
total_loss = 0
loss.backward()
value_loss.backward()
if i % log_interval*10 == 0 and args.tensorboard:
for name, param in model_pitheta.named_parameters():
if name == 'encoder.weight': continue
writer.add_histogram('pitheta/grad_'+name, param.grad.data.cpu().numpy(), epoch*len(batches_id)+i)
# to prevent the exploding gradient problem
if Epoch>1:
torch.nn.utils.clip_grad_norm(model_pitheta.parameters(), clip)
torch.nn.utils.clip_grad_norm(model_crit.parameters(), clip)
if args.optim == 'manual_lr':
for n, p in model_pitheta.named_parameters():
if n == 'encoder.weight': continue
p.data.add_(-lr, p.grad.data)
for n, p in model_crit.named_parameters():
if n == 'encoder.weight': continue
p.data.add_(-lr, p.grad.data)
else:
if Epoch>1:
optimizer_pi.step()
optimizer_w.step()
return model_pitheta, T_loss/len(batches_id), V_loss/len(batches_id), model_crit
def plan_z_estim_ac_dpg(model_q, x, model_crit):
# output Z_planned: [(n+2) x batch]
action = x[1:]
batch_size_i = x.size(1)
seq_len = x.size(0)
hids = [0]*seq_len
c_hids = [0]*seq_len
hidden = model_crit.init_hidden(batch_size_i)
plan_z = torch.zeros((seq_len, batch_size_i)).cuda()
END = 2
inv_seq_mask = (x == END) + (x == PAD) # 0: sequence ; 1: finished sequence
term_elem_mask = (x != END) # 0: finished sequence
action = torch.mul(action, (action != PAD).long())
out = torch.zeros(seq_len, batch_size_i).cuda().long()
len_inp = torch.ones((batch_size_i), dtype=torch.int64)
for d in range(args.rl_plan_depth+1):
for pos in range(seq_len):
symb = x[pos:pos+1]
mask = (symb!=END).float().unsqueeze(2)
if d == 0:
# left cell
if pos>0:
hidden = (hids[pos-1].cuda(), c_hids[pos-1].cuda())
else:
# bottom cell from previous layer
hidden = (hids[pos].cuda(), c_hids[pos].cuda())
# log domain est_z: [1 x batch x ninp]
_, hidden, est_z = model_crit(symb, hidden, len_inp, mask, critic=True)
hids[pos] = hidden[0].detach().cpu()
c_hids[pos] = hidden[1].detach().cpu()
if d < args.rl_plan_depth:
if d ==0 and (pos < seq_len - 1):
# choose actions sampled from q
max_z_ind = action[pos]
#print('z', est_z.size(), 'a', action.size(), ninp, pos, batch_size_i)
max_z_val = torch.sum(est_z.view(-1, ninp) * to_one_hot(action[pos], n_dims=ninp), dim = 1).view(batch_size_i, 1)
else:
# choose best action according to Z estimates
max_z_val, max_z_ind = est_z.max(2)
# accumulate Z value of the leaves that are not going to be expanded
# mask in logsumexp maximum elements for Z
not_expand_est_z = torch.where(to_one_hot(max_z_ind, n_dims=ninp).view(est_z.size()).byte(), (torch.ones(est_z.size())*float('-inf')).cuda(), est_z )
#plan_z[pos] = plan_z[pos] + torch.mul(est_z.sum(2).view(batch_size_i) - max_z_val.view(batch_size_i), torch.mul((inv_seq_mask[pos] == 0).float(), (symb != END).float().squeeze()))
plan_z[pos] = plan_z[pos] + torch.mul(logsumexp(not_expand_est_z, dim=2).view(batch_size_i), torch.mul((inv_seq_mask[pos] == 0).float(), (symb != END).float().squeeze()))
# for the terminal elements add the final Z value
plan_z[pos] = plan_z[pos] + torch.mul(logsumexp(est_z, dim=2).view(batch_size_i), torch.mul((term_elem_mask[pos] == 0).float(), (symb == END).float().squeeze()))
# for accumulated leaves
term_elem_mask[pos] = term_elem_mask[pos] + (symb == END)
# for finished sequences or PAD tokens mask > 0
inv_seq_mask[pos] = inv_seq_mask[pos] + (max_z_ind == END)
out[pos] = max_z_ind.view(batch_size_i)
else:
# for unfinished sequences add all leaves
plan_z[pos] = plan_z[pos] + torch.mul(logsumexp(est_z, dim=2).view(batch_size_i), (term_elem_mask[pos] == 0).float())
x = out
del hids
del c_hids
del inv_seq_mask
return plan_z
def train_pi_theta_ac_dpg(model_pitheta, model_plambda, ce_criterion, epoch, lr, motifs, feat, optimizers, writer, z_estim_mc, model_q, model_crit):
# collect trajectories using q
# find a new pi_theta that maximizes the AC D-PG objective on these trajectories
batch_size = 4000
N = 20
trajectories = {'r': torch.zeros((0,1)), 'adv': torch.zeros((0,1)), 'inp': torch.zeros((1,0)).long(), 'tar': torch.zeros((1,0)).long(),
'logpi_k_a': torch.zeros((0,1)), 'q_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid), 'q_c_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid),
'cr_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid), 'cr_c_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid)
}
target_kl = args.rl_target_kl
all_x = torch.zeros((args.n+2,0)).long()
if 'fix_length' in args.debug_opt:
max_len = args.n+1
else:
max_len = 3*args.n
pl_hidden = model_plambda.init_hidden(batch_size)
if not 'stable_q' in args.train2:
model_q = model_pitheta
for wrkr in range(N):
# get samples from q
x, log_pi, inp, len_inp, action, mask_tar, hids, c_hids = sample_data_inp_targ_vary_hid(model_q, batch_size, max_len=max_len)
# get Plambda(x)
r_output, _, log_lin = model_plambda(inp, pl_hidden, len_inp, mask_tar)
if not 'wn' in args.train2:
log_r = get_log_r(r_output, action, mask_tar, ce_criterion).sum(0) # [batch x 1]
else:
log_r = r_output
P_lambda = torch.exp(log_r + log_lin)
# [(n+1) x batch x 1]
log_pi_a_all = get_log_r(log_pi, action, mask_tar, ce_criterion)
# [batch x 1] -> [batch x (n+1)] -> [(n+1) x batch]
log_P_lambda = (log_r + log_lin).repeat(1, inp.size(0)).t().contiguous()
#assert log_P_lambda.size() == log_pi_a_all.squeeze().size()
log_pi_x_s = log_pi_a_all.squeeze().clone()
for i in range(log_pi_x_s.size(0)):
for j in range(log_pi_x_s.size(1)):
log_pi_x_s[i,j] = log_pi_a_all[i:,j].sum()
# unbiased estimate for log_Z(s) = log(P_lambda(x)/pi_theta(x|s))
rewards = (log_P_lambda - log_pi_x_s).view(-1, 1)
len_inp = (x!= PAD).sum(0)
mask_tar = (x != PAD).unsqueeze(2).float().cuda()
if 'ac_dpg_a' in args.train2:
# log domain
est_z = plan_z_estim_ac_dpg(model_q, x, model_crit)
else:
hidden = model_crit.init_hidden(x.size(1))
mask = torch.ones((1, x.size(1), 1)).cuda()
cr_hids = torch.zeros(model_crit.nlayers, x.size(1)*x.size(0), model_crit.nhid)
cr_c_hids = torch.zeros(model_crit.nlayers, x.size(1)*x.size(0), model_crit.nhid)
est_z = torch.zeros(x.size(0), x.size(1), 1).cuda()
# est_z: [seq_len x batch x 1] -- log domain
for i in range(x.size(0)):
# [1 x batch x ntok]
_, hidden, est_z_i = model_crit(x[i:i+1], hidden, len_inp, mask, critic=True)
cr_hids[:,i*x.size(1):(i+1)*x.size(1),:,] = hidden[0].cpu().detach()
cr_c_hids[:,i*x.size(1):(i+1)*x.size(1),:,] = hidden[1].cpu().detach()
est_z[i:i+1] = est_z_i
#_, _, est_z = model_crit(x, hidden, len_inp, mask_tar, critic=True) # outpt [n+2, batch, ntok]
est_z_now = est_z[:-1].view(-1, 1)
est_z_next = est_z[1:].view(-1, 1)
log_pi_a_s = log_pi_a_all.view(-1, 1)
ratio_zs = torch.clamp(est_z_next - est_z_now, max=0)
advantage = torch.exp(ratio_zs - log_pi_a_s)
# ------------------ inspect max advantages ---------------------
max_adv, max_idx = torch.max(advantage, 0)
#print(max_idx, log_pi_a_s.size(), advantage.size(), est_z_now.size(), est_z.size(), P_lambda.size())
if args.wandb:
wandb.log({'epoch': epoch*N+wrkr, 'max_adv': max_adv.mean().data.cpu().numpy(),
'max_q_adv': torch.exp(log_pi_a_s[max_idx, 0]).data.cpu().numpy(),
'max_z_now': est_z_now[max_idx, 0].data.cpu().numpy(), 'max_z_next': est_z_next[max_idx, 0].data.cpu().numpy(),
'est_z_5':est_z[5].mean().data.cpu().numpy()})
print('max_adv',max_adv.mean().data.cpu().numpy(),
'max_q_adv', torch.exp(log_pi_a_s[max_idx, 0]).data.cpu().numpy())
# ------------------------------------------------------------
len_inp = len_inp - 1
assert rewards.size() == est_z_now.size() == log_pi_a_s.size()
print('rewards', rewards.mean().data.cpu().numpy(), 'P_lambda', P_lambda.mean().data.cpu().numpy())
idx = random.randrange(x.size(1))
print(x[:,idx].cpu().numpy())
# [ batch*seq_len x 1]
log_pi_a_all = log_pi_a_all.view(-1, 1)
inp = inp.view(1, -1)
action = action.view(1, -1)
if args.wandb:
wandb.log({'epoch': epoch*N+wrkr, 'rewards': torch.exp(rewards).mean().data.cpu().numpy(),
'pitheta_P_lambda': P_lambda.mean().data.cpu().numpy(),
'pitheta_seq_len': len_inp.float().mean().data.cpu().numpy(), 'advantage':advantage.mean().data.cpu().numpy(),
'est_z_0':est_z[0].mean().data.cpu().numpy()})
if args.tensorboard:
writer.add_scalar('pitheta/rewards', rewards.mean().data.cpu().numpy(), epoch*N+wrkr)
writer.add_scalar('pitheta/P_lambda', P_lambda.mean().data.cpu().numpy(), epoch*N+wrkr)
writer.add_scalar('pitheta/seq_len', len_inp.float().mean().data.cpu().numpy(), epoch*N+wrkr)
all_x = cat_variable_length(all_x, x.cpu().detach())
# due to the small GPU memory move trajectories to cpu
trajectories['q_c_hid'] = torch.cat((trajectories['q_c_hid'], c_hids), dim=1).detach()
trajectories['q_hid'] = torch.cat((trajectories['q_hid'], hids), dim=1).detach()
trajectories['cr_c_hid'] = torch.cat((trajectories['cr_c_hid'], cr_c_hids), dim=1).detach()
trajectories['cr_hid'] = torch.cat((trajectories['cr_hid'], cr_hids), dim=1).detach()
trajectories['inp'] = torch.cat((trajectories['inp'], inp.cpu()), dim=1).detach()
trajectories['tar'] = torch.cat((trajectories['tar'], action.cpu()), dim=1).detach()
# [batch x 1]
trajectories['r'] = torch.cat(( trajectories['r'], rewards.cpu()), dim=0).detach()
trajectories['adv'] = torch.cat(( trajectories['adv'], advantage.cpu()), dim=0).detach()
trajectories['logpi_k_a'] = torch.cat(( trajectories['logpi_k_a'], log_pi_a_all.cpu()), dim=0).detach()
if wrkr % 4 == 0 or (wrkr == N-1):
e = wrkr
model_pitheta, loss, v_loss, model_crit = ac_dpg_update_one_epoch(model_pitheta, trajectories, ce_criterion, optimizers, lr, e, writer, model_crit, epoch)
if args.wandb:
wandb.log({'epoch': epoch*N+wrkr, 'pitheta_policy_loss': loss, 'pitheta_crit_loss': v_loss})
del trajectories
trajectories = {'r': torch.zeros((0,1)), 'adv': torch.zeros((0,1)), 'inp': torch.zeros((1,0)).long(), 'tar': torch.zeros((1,0)).long(),
'logpi_k_a': torch.zeros((0,1)), 'q_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid), 'q_c_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid),
'cr_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid), 'cr_c_hid': torch.zeros(model_pitheta.nlayers, 0, model_pitheta.nhid)
}
tr_feats_pi = get_features(all_x, motifs, feat).mean(0).cpu().numpy()
print('train ds feat = ', tr_feats_pi)
if args.wandb:
wandb.log({'epoch': epoch, 'pitheta_feats': tr_feats_pi})
if args.tensorboard:
writer.add_histogram('pitheta/tr_feats_sampled', tr_feats_pi, epoch)
model_pitheta.train()
#e = 0
#model_pitheta, loss, model_crit = ac_dpg_update_one_epoch(model_pitheta, trajectories, ce_criterion, optimizers, lr, e, writer, model_crit)
trajectories = None
torch.cuda.empty_cache()
return model_pitheta, loss, model_q, model_crit
def main():
# training-1: train proposal r on D and obtain P_lambda
model_r, model_plambda, test_ce_r, test_ce_pl,theor_ent,tstamp,lambd, Epoch_start_time, writer, optimizer_r, all_data = training_1()
# training-2: get pi_theta from P_lambda
if 'cyclic' in args.train2:
# cyclic improvement of pi -> r
print('CYCLIC MODE')
test_ce_pi,mfeat_pl,tstamp,Final_duration,train_l1_pl, model_pitheta, lambd, test_ce_pl = cyclic_r_plambda_pitheta(model_plambda, model_r, tstamp, Epoch_start_time, writer, optimizer_r, all_data)
elif 'distill' in args.train2:
#distill in one cyclic iteration
print('distill in one cyclic iteration')
test_ce_pi,mfeat_pl,tstamp,Final_duration,train_l1_pl, model_pitheta = r_plambda_distill_pitheta(model_plambda, model_r, tstamp, Epoch_start_time, writer, all_data)
elif 'pg' in args.train2 or 'ppo' in args.train2:
print('RL MODE')
test_ce_pi,mfeat_pl,tstamp,Final_duration,train_l1_pl, model_pitheta = rl_pitheta(model_plambda, model_r, tstamp, Epoch_start_time, writer, all_data)
# ------------------------------------------ frequency of motifs in samples from pi_theta and r ------------------------------------
log_dir = os.path.join(args.logdir,'pg_methods/runs/chpt_%s'%(timestamp))
model_r = torch.load(os.path.join(log_dir,'chpt_%s_r.pt'%(timestamp)))
motif_freq, avg_len = sample_from_rnn(model_pitheta)
motif_freq_r, avg_len_r = sample_from_rnn(model_r)
print('r avg_len', avg_len_r, 'r motif_freq', motif_freq_r)
print('pi avg_len', avg_len, 'pi motif_freq', motif_freq)
if args.wandb:
wandb.log({'T2_duration': Final_duration, 'lambd':lambd, 'mfeat_pl':mfeat_pl, 'pitheta_mfreq': motif_freq, 'r_mfreq': motif_freq_r, 'pi_avg_len':avg_len,
'r_avg_len':avg_len_r })
tstamp = tstamp+'_'+str(args.rl_seed)+'_'+str(motif_freq)+'_'+str(avg_len) +'_'+str(motif_freq_r)+'_'+str(avg_len_r)
return [test_ce_r,test_ce_pi,test_ce_pl,train_l1_pl,theor_ent,tstamp,lambd,mfeat_pl,Final_duration]
if __name__ == "__main__":
info_p_lambda = main()
train = args.train+'_'+args.train2+'_'+args.debug_opt
print(tuple(info_p_lambda+[args.mtype+'.'+all_motifs[args.n],train,args.feat,args.n,args.ds_size,args.job]))
# ----------------------- store the results into database ---------------------------------
if not args.test_run:
# r_plambda_distill_pitheta.db
with sqlite3.connect('/tmp-network/user/tparshak/r_plambda_pitheta.db', timeout=10) as conn:
# this will be executed once because of the "IF NOT EXISTS" clause
conn.execute('CREATE TABLE IF NOT EXISTS results (test_ce_r REAL,test_ce_pi REAL,test_ce_pl REAL,train_l1_pl REAL,theor_ent REAL,tstamp TEXT,lambd TEXT,mfeat_pl TEXT,plambda_time REAL,motif TEXT,train_reg TEXT,feat TEXT,n INTEGER,ds_size INTEGER,job INTEGER)')
conn.execute('INSERT INTO results (test_ce_r,test_ce_pi,test_ce_pl,train_l1_pl,theor_ent,tstamp,lambd,mfeat_pl,plambda_time,motif,train_reg,feat,n,ds_size,job) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
tuple(info_p_lambda+[args.mtype+'.'+all_motifs[args.n],train,args.feat,args.n,args.ds_size,args.job]))
| StarcoderdataPython |
1924376 | import sys
import weakref
from _weakref import ref
try:
from _weakref import _remove_dead_weakref
except ImportError:
def _remove_dead_weakref(o, key):
del o[key]
import types
AIO_AVAILABLE = sys.version_info >= (3, 5)
if AIO_AVAILABLE:
import asyncio
else:
asyncio = None
PY2 = sys.version_info.major == 2
if not PY2:
basestring = str
def get_method_vars(m):
if PY2:
f = m.im_func
obj = m.im_self
else:
f = m.__func__
obj = m.__self__
return f, obj
def iscoroutinefunction(obj):
if AIO_AVAILABLE:
return asyncio.iscoroutinefunction(obj)
return False
class WeakMethodContainer(weakref.WeakValueDictionary):
"""Container to store weak references to callbacks
Instance methods are stored using the underlying :term:`function` object
and the instance id (using :func:`id(obj) <id>`) as the key (a two-tuple)
and the object itself as the value. This ensures proper weak referencing.
Functions are stored using the string "function" and the id of the function
as the key (a two-tuple).
"""
def keys(self):
if PY2:
return self.iterkeys()
return super(WeakMethodContainer, self).keys()
def add_method(self, m, **kwargs):
"""Add an instance method or function
Args:
m: The instance method or function to store
"""
if isinstance(m, types.FunctionType):
self['function', id(m)] = m
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
self[wrkey] = obj
def del_method(self, m):
"""Remove an instance method or function if it exists
Args:
m: The instance method or function to remove
"""
if isinstance(m, types.FunctionType) and not iscoroutinefunction(m):
wrkey = ('function', id(m))
else:
f, obj = get_method_vars(m)
wrkey = (f, id(obj))
if wrkey in self:
del self[wrkey]
def del_instance(self, obj):
"""Remove any stored instance methods that belong to an object
Args:
obj: The instance object to remove
"""
to_remove = set()
for wrkey, _obj in self.iter_instances():
if obj is _obj:
to_remove.add(wrkey)
for wrkey in to_remove:
del self[wrkey]
def iter_instances(self):
"""Iterate over the stored objects
Yields:
wrkey: The two-tuple key used to store the object
obj: The instance or function object
"""
for wrkey in set(self.keys()):
obj = self.get(wrkey)
if obj is None:
continue
yield wrkey, obj
def iter_methods(self):
"""Iterate over stored functions and instance methods
Yields:
Instance methods or function objects
"""
for wrkey, obj in self.iter_instances():
f, obj_id = wrkey
if f == 'function':
yield self[wrkey]
else:
yield getattr(obj, f.__name__)
class InformativeDict(dict):
def __delitem__(self, key):
super(InformativeDict, self).__delitem__(key)
self.del_callback(key)
class InformativeWVDict(weakref.WeakValueDictionary):
"""A WeakValueDictionary providing a callback for deletion
Keyword Arguments:
del_callback: A callback function that will be called when an item is
either deleted or dereferenced. It will be called with the key as
the only argument.
"""
def __init__(self, **kwargs):
self.del_callback = kwargs.get('del_callback')
weakref.WeakValueDictionary.__init__(self)
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
# Atomic removal is necessary since this function
# can be called asynchronously by the GC
_remove_dead_weakref(self.data, wr.key)
self._data_del_callback(wr.key)
self._remove = remove
self.data = InformativeDict()
self.data.del_callback = self._data_del_callback
def _data_del_callback(self, key):
self.del_callback(key)
class EmissionHoldLock_(object):
"""Context manager used for :meth:`pydispatch.dispatch.Dispatcher.emission_lock`
Args:
event_instance: The :class:`~pydispatch.dispatch.Event` instance
associated with the lock
Attributes:
event_instance: The :class:`~pydispatch.dispatch.Event` instance
associated with the lock
last_event: The positional and keyword arguments from the event's last
emission as a two-tuple. If no events were triggered while the lock
was held, :obj:`None`.
held (bool): The internal state of the lock
"""
def __init__(self, event_instance):
self.event_instance = event_instance
self.last_event = None
self.held = False
def acquire(self):
if self.held:
return
self.held = True
self.last_event = None
def release(self):
if not self.held:
return
if self.last_event is not None:
args, kwargs = self.last_event
self.last_event = None
self.held = False
self.event_instance(*args, **kwargs)
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args):
self.release()
if AIO_AVAILABLE:
from pydispatch.aioutils import AioEmissionHoldLock
class EmissionHoldLock(EmissionHoldLock_, AioEmissionHoldLock):
pass
else:
EmissionHoldLock = EmissionHoldLock_
| StarcoderdataPython |
5009165 | import contextlib
from typing import Generator, List
import fastapi
import fastapi.middleware
__all__ = ["override_middleware"]
@contextlib.contextmanager
def override_middleware(
app: fastapi.FastAPI, middleware: List[fastapi.middleware.Middleware]
) -> Generator[None, None, None]:
"""Temporarily override the middleware stack of a FastAPI app."""
orig_middleware = app.user_middleware
try:
app.user_middleware = middleware
app.middleware_stack = app.build_middleware_stack()
yield
finally:
app.user_middleware = orig_middleware
app.middleware_stack = app.build_middleware_stack()
| StarcoderdataPython |
1834473 | <reponame>861934367/genecast<filename>genecast_package/depth_coverage_plot.py
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import multiprocessing
import sh
import pysam
from collections import defaultdict
class TypeException(Exception):
pass
def bin(group, n):
depth = []
group = np.array(group)
for i in range(0, len(group), n):
depth.append(np.median(group[i:i + n]))
return np.log10(depth)
def plot(data, args=None, file=None):
fig, axs = plt.subplots(nrows=2, ncols=1,figsize=(15,12))
average_depth = data[args.type].mean()
percentage20 = len(data.loc[data[args.type] > average_depth * 0.2]) / len(data)
if args.type == "base": reads = bin(data[args.type], args.n)
else: reads = np.log10(data[args.type])
ax1, ax2 = axs[0], axs[1]
ax1.bar(np.arange(len(reads)), reads, 1, color="slateblue")
ax1.set_ylabel("$Log10(%s)$" % args.type, size=20)
#ax1.set_title("Uniformity of Coverage (Average Coverage = %d)" % (average_depth), size=20)
reads.sort()
ax2.bar(np.arange(len(reads)), reads, 1, color="slateblue")
ax2.set_ylabel("$Log10(%s)$" % args.type, size=20)
ax2.set_xticks([])
if args.type == "base":
ax1.set_xlabel("panel_loction(bin=%d)" % args.n, size=20)
ax2.set_xlabel("sort_depth(bin=%d)" % args.n, size=20)
ax1.set_title("Uniformity of Coverage (Average Coverage = %d percentage20 = %0.3f)" % (average_depth, percentage20), size=20)
else:
ax1.set_xlabel("panel_loction(bin=panel_region)", size=20)
ax2.set_xlabel("sort_depth(bin=panel_region)", size=20)
ax1.set_title("Uniformity of Coverage (Average reads of panel_region = %d percentage20 = %0.3f)" % (average_depth, percentage20), size=20)
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, hspace = 0.2, wspace = 0.3)
plt.savefig("%s_uniformity_coverage.png" % file.split(".")[0], dpi=600)
plt.close()
def multiprocessing_plot(file, args):
if args.type == "reads":
sam = pysam.AlignmentFile(file)
data = pd.read_table("%s" % args.panel, names=["chr", "start", "end", "gene", "transcript"])
data["reads"] = [sam.count(chr, start, end) / (end - start) for chr, start, end in zip(data["chr"], data["start"], data["end"])]
elif args.type == "base":
try:
data = pd.read_table(file.strip(".") + ".depth", names=["chr", "pos", "base"])
except:
re = sh.samtools("depth", file, "-b", "%s" % args.panel)
f = open(file.strip(".") + ".depth", "wb")
f.write(re.stdout); f.close()
data = pd.read_table(file.strip(".") + ".depth", names=["chr", "pos", "base"])
else:
raise TypeException("data type is wrong")
plot(data, args=args, file=file)
return np.log10(data[args.type])
def plot_coverage(args=None):
pool = multiprocessing.Pool(processes=args.progress)
box_data = {}
for file in args.bams:
box_data[file.split(".")[0]] = pool.apply_async(multiprocessing_plot, (file, args))
pool.close(); pool.join()
box_data = {key: value.get() for key, value in box_data.items()}
data = pd.DataFrame(box_data)
fig, ax1 = plt.subplots(figsize=(len(args.bams), 12))
sns.boxplot(data=data, ax=ax1, width=0.2, linewidth=.5)
ax1.set_title("Uniformity of Coverage")
ax1.set_ylabel("$Log10(%s)$" % args.type)
fig.autofmt_xdate(ha='center', rotation=0)
plt.xticks(rotation=90)
fig.savefig(r'%s_Uniformity_Boxplot.png' % (args.out), dpi=600)
plt.close()
| StarcoderdataPython |
12831950 | import random
import cv2
import numpy as np
import torch
from torchvision.transforms import RandomApply, Compose
class PrepareImageAndMask(object):
"""Prepare images and masks like fixing channel numbers."""
def __call__(self, data):
img = data['input']
img = img[:, :, :3] # max 3 channels
img = img / 255
if 'mask' in data:
mask = data['mask']
else:
mask = np.zeros(img.shape[:2], dtype=img.dtype)
data['input'] = img.astype(np.float32)
data['mask'] = mask.astype(np.float32)
return data
def to_tensor(pic):
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((0, 1, 2)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
class ConvertToTensor(object):
""" Converts the image to tensor.
Note:
Modified from PyTorch vision ToTensor. Converts a PIL Image or numpy.ndarray (H x W x C) in the
range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] by calling the
to_tensor function.
"""
def __call__(self, data):
trans_images_arr = np.expand_dims(data['input'], axis=0)
trans_labels_arr = np.expand_dims(data['mask'], axis=0)
data['input'] = to_tensor(trans_images_arr)
data['mask'] = to_tensor(trans_labels_arr)
return data
class ResizeToNxN(object):
"""Resize input images to rgb NxN and the masks into gray NxN.
Note:
uses cv2.INTER_LINEAR which implements bilinear interpolation for resizing.
"""
def __init__(self, n=128):
self.n = n
def __call__(self, data):
n = self.n
data['input'] = cv2.resize(data['input'], (n, n), interpolation=cv2.INTER_LINEAR)
data['mask'] = cv2.resize(data['mask'], (n, n), interpolation=cv2.INTER_NEAREST)
return data
def compute_padding(h, w, n=128):
if h % n == 0:
dy0, dy1 = 0, 0
else:
dy = n - h % n
dy0 = dy // 2
dy1 = dy - dy0
if w % n == 0:
dx0, dx1 = 0, 0
else:
dx = n - w % n
dx0 = dx // 2
dx1 = dx - dx0
return dy0, dy1, dx0, dx1
class PadToNxN(object):
"""Apply Pad to image size NxN using border reflection.
Note:
uses copyMakeBorder which and BORDER_REFLECT_101 which basically reflects the border of the image to pad.
"""
def __init__(self, n=128):
self.n = n
def __call__(self, data):
n = self.n
h, w = data['input'].shape[:2]
dy0, dy1, dx0, dx1 = compute_padding(h, w, n)
data['input'] = cv2.copyMakeBorder(data['input'], dy0, dy1, dx0, dx1, cv2.BORDER_REFLECT_101)
data['mask'] = cv2.copyMakeBorder(data['mask'], dy0, dy1, dx0, dx1, cv2.BORDER_REFLECT_101)
return data
class HorizontalFlip(object):
"""Flip input and masks horizontally."""
def __call__(self, data):
data['input'] = cv2.flip(data['input'], 1)
data['mask'] = cv2.flip(data['mask'], 1)
return data
class BrightnessShift(object):
"""Applies Brightness shift to the images.
Note:
When changing the brightness of an image, a constant is added or subtracted from the luminnance of all
sample values. Here we are shifting the histogram left (subtraction) or right (addition) by a max value.
"""
def __init__(self, max_value=0.1):
self.max_value = max_value
def __call__(self, data):
img = data['input']
img += np.random.uniform(-self.max_value, self.max_value)
data['input'] = np.clip(img, 0, 1)
return data
class BrightnessScaling(object):
"""Applies Brightness scaling to the images.
Note:
Brightness scaling scales the histogram by a max value.
"""
def __init__(self, max_value=0.08):
self.max_value = max_value
def __call__(self, data):
img = data['input']
img *= np.random.uniform(1 - self.max_value, 1 + self.max_value)
data['input'] = np.clip(img, 0, 1)
return data
class GammaChange(object):
"""Applies Gamma change to the images.
Note:
is a nonlinear operation used to encode and decode luminance values in images.
"""
def __init__(self, max_value=0.08):
self.max_value = max_value
def __call__(self, data):
img = data['input']
img = img ** (1.0 / np.random.uniform(1 - self.max_value, 1 + self.max_value))
data['input'] = np.clip(img, 0, 1)
return data
def do_elastic_transform(image, mask, grid=10, distort=0.2):
height, width = image.shape[:2]
x_step = int(grid)
xx = np.zeros(width, np.float32)
prev = 0
for x in range(0, width, x_step):
start = x
end = x + x_step
if end > width:
end = width
cur = width
else:
cur = prev + x_step * (1 + random.uniform(-distort, distort))
xx[start:end] = np.linspace(prev, cur, end - start)
prev = cur
y_step = int(grid)
yy = np.zeros(height, np.float32)
prev = 0
for y in range(0, height, y_step):
start = y
end = y + y_step
if end > height:
end = height
cur = height
else:
cur = prev + y_step * (1 + random.uniform(-distort, distort))
yy[start:end] = np.linspace(prev, cur, end - start)
prev = cur
# grid
map_x, map_y = np.meshgrid(xx, yy)
map_x = map_x.astype(np.float32)
map_y = map_y.astype(np.float32)
image = cv2.remap(image, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
mask = cv2.remap(mask, map_x, map_y, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
# mask = (mask > 0.5).astype(np.float32)
return image, mask
class ElasticDeformation(object):
"""Applies Elastic deformation to the images.
Note:
Elastic deformation of images as described in [Simard2003]_ (with modifications).
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
def __init__(self, grid=10, max_distort=0.15):
self.grid = grid
self.max_distort = max_distort
def __call__(self, data):
distort = np.random.uniform(0, self.max_distort)
img, mask = do_elastic_transform(data['input'], data['mask'], self.grid, distort)
data['input'] = img
data['mask'] = mask
return data
def do_rotation_transform(image, mask, angle=0):
height, width = image.shape[:2]
cc = np.cos(angle / 180 * np.pi)
ss = np.sin(angle / 180 * np.pi)
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2, height / 2])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_REFLECT_101,
borderValue=(0, 0, 0,))
# mask = (mask > 0.5).astype(np.float32)
return image, mask
class Rotation(object):
"""Applies to the Rotation to the images.
Note:
Does rotation transformation.
"""
def __init__(self, max_angle=15):
self.max_angle = max_angle
def __call__(self, data):
angle = np.random.uniform(-self.max_angle, self.max_angle)
img, mask = do_rotation_transform(data['input'], data['mask'], angle)
data['input'] = img
data['mask'] = mask
return data
def do_horizontal_shear(image, mask, scale=0):
height, width = image.shape[:2]
dx = int(scale * width)
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
box1 = np.array([[+dx, 0], [width + dx, 0], [width - dx, height], [-dx, height], ], np.float32)
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
# mask = (mask > 0.5).astype(np.float32)
return image, mask
class HorizontalShear(object):
"""Applies Horizontal Shear to the images.
Note:
horizontal shear (or shear parallel to the x axis) is a function that takes a generic point with coordinates
(x,y) to the point (x+my,y); where m is a fixed parameter, called the shear factor.
"""
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
scale = np.random.uniform(-self.max_scale, self.max_scale)
img, mask = do_horizontal_shear(data['input'], data['mask'], scale)
data['input'] = img
data['mask'] = mask
return data
class HWCtoCHW(object):
"""Converts HWC to CHW."""
def __call__(self, data):
data['input'] = data['input'].transpose((2, 0, 1))
return data
def augmentations(args):
"""Applies random augmentations for the input images based on the transform probability.
Note:
Many methods are taken from https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/63974.
The user can specify between geometric, image or both types of transforms to the images since sometimes
some transformations work well for certain datasets.
:param args:
image_size (int) : size of the image to be resized.
transform_prob (float) : probability to apply transformations on the data.
:return:
a compose of transformations.
"""
augment_type = 'geometric'
transform_prob = args.transform_prob
if augment_type == 'geometric':
geometric_transforms = Compose([RandomApply([HorizontalShear(max_scale=0.07)], p=transform_prob),
RandomApply([Rotation(max_angle=15)], p=transform_prob),
RandomApply([ElasticDeformation(max_distort=0.15)], p=transform_prob),
ResizeToNxN(args.image_size),
ConvertToTensor()
])
return geometric_transforms
elif augment_type == 'image':
brightness_transform = Compose([RandomApply([BrightnessShift(max_value=0.1)], p=transform_prob),
RandomApply([BrightnessScaling(max_value=0.08)], p=transform_prob),
RandomApply([GammaChange(max_value=0.08)], p=transform_prob),
ResizeToNxN(args.image_size),
ConvertToTensor()
])
return brightness_transform
elif augment_type == 'both':
both_transforms = Compose([RandomApply([HorizontalShear(max_scale=0.07)], p=transform_prob),
RandomApply([Rotation(max_angle=15)], p=transform_prob),
RandomApply([ElasticDeformation(max_distort=0.15)], p=transform_prob),
RandomApply([BrightnessShift(max_value=0.1)], p=transform_prob),
RandomApply([BrightnessScaling(max_value=0.08)], p=transform_prob),
RandomApply([GammaChange(max_value=0.08)], p=transform_prob),
ResizeToNxN(args.image_size),
ConvertToTensor()
])
return both_transforms
| StarcoderdataPython |
5074921 | import py
class DefaultPlugin:
""" Plugin implementing defaults and general options. """
def pytest_pyfunc_call(self, pyfuncitem, args, kwargs):
pyfuncitem.obj(*args, **kwargs)
return
def pytest_collect_file(self, path, parent):
ext = path.ext
pb = path.purebasename
if pb.startswith("test_") or pb.endswith("_test") or \
path in parent.config.args:
if ext == ".py":
return parent.Module(path, parent=parent)
def pytest_collect_directory(self, path, parent):
#excludelist = parent._config.getvalue_pathlist('dir_exclude', path)
#if excludelist and path in excludelist:
# return
if not parent.recfilter(path):
# check if cmdline specified this dir or a subdir directly
for arg in parent.config.args:
if path == arg or arg.relto(path):
break
else:
return
# not use parent.Directory here as we generally
# want dir/conftest.py to be able to
# define Directory(dir) already
Directory = parent.config.getvalue('Directory', path)
return Directory(path, parent=parent)
def pytest_addoption(self, parser):
group = parser.addgroup("general", "test collection and failure interaction options")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-x', '--exitfirst',
action="store_true", dest="exitfirst", default=False,
help="exit instantly on first error or failed test."),
group._addoption('-k',
action="store", dest="keyword", default='',
help="only run test items matching the given "
"space separated keywords. precede a keyword with '-' to negate. "
"Terminate the expression with ':' to treat a match as a signal "
"to run all subsequent tests. ")
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
#group._addoption('--showskipsummary',
# action="store_true", dest="showskipsummary", default=False,
# help="always show summary of skipped tests")
group._addoption('--pdb',
action="store_true", dest="usepdb", default=False,
help="start pdb (the Python debugger) on errors.")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='long',
type="choice", choices=['long', 'short', 'no'],
help="traceback verboseness (long/short/no).")
group._addoption('-s',
action="store_true", dest="nocapture", default=False,
help="disable catching of stdout/stderr during test run.")
group.addoption('--boxed',
action="store_true", dest="boxed", default=False,
help="box each test run in a separate process")
group._addoption('-p', action="append", dest="plugin", default = [],
help=("load the specified plugin after command line parsing. "
"Example: '-p hello' will trigger 'import pytest_hello' "
"and instantiate 'HelloPlugin' from the module."))
group._addoption('-f', '--looponfail',
action="store_true", dest="looponfail", default=False,
help="run tests, re-run failing test set until all pass.")
group = parser.addgroup("test process debugging")
group.addoption('--collectonly',
action="store_true", dest="collectonly",
help="only collect tests, don't execute them."),
group.addoption('--traceconfig',
action="store_true", dest="traceconfig", default=False,
help="trace considerations of conftest.py files."),
group._addoption('--nomagic',
action="store_true", dest="nomagic", default=False,
help="don't reinterpret asserts, no traceback cutting. ")
group._addoption('--fulltrace',
action="store_true", dest="fulltrace", default=False,
help="don't cut any tracebacks (default is to cut).")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
group._addoption('--iocapture', action="store", default="fd", metavar="method",
type="choice", choices=['fd', 'sys', 'no'],
help="set iocapturing method: fd|sys|no.")
group.addoption('--debug',
action="store_true", dest="debug", default=False,
help="generate and show debugging information.")
group = parser.addgroup("dist", "distributed testing") # see http://pytest.org/help/dist")
group._addoption('--dist', metavar="distmode",
action="store", choices=['load', 'each', 'no'],
type="choice", dest="dist", default="no",
help=("set mode for distributing tests to exec environments.\n\n"
"each: send each test to each available environment.\n\n"
"load: send each test to available environment.\n\n"
"(default) no: run tests inprocess, don't distribute."))
group._addoption('--tx', dest="tx", action="append", default=[], metavar="xspec",
help=("add a test execution environment. some examples: "
"--tx popen//python=python2.5 --tx socket=192.168.1.102:8888 "
"--tx ssh=user@codes<EMAIL>//chdir=testcache"))
group._addoption('-d',
action="store_true", dest="distload", default=False,
help="load-balance tests. shortcut for '--dist=load'")
group._addoption('-n', dest="numprocesses", metavar="numprocesses",
action="store", type="int",
help="shortcut for '--dist=load --tx=NUM*popen'")
group.addoption('--rsyncdir', action="append", default=[], metavar="dir1",
help="add directory for rsyncing to remote tx nodes.")
def pytest_configure(self, config):
self.fixoptions(config)
self.setsession(config)
self.loadplugins(config)
def fixoptions(self, config):
if config.option.numprocesses:
config.option.dist = "load"
config.option.tx = ['popen'] * int(config.option.numprocesses)
if config.option.distload:
config.option.dist = "load"
if config.getvalue("usepdb"):
if config.getvalue("looponfail"):
raise config.Error("--pdb incompatible with --looponfail.")
if config.option.dist != "no":
raise config.Error("--pdb incomptaible with distributing tests.")
def loadplugins(self, config):
for name in config.getvalue("plugin"):
print "importing", name
config.pytestplugins.import_plugin(name)
def setsession(self, config):
val = config.getvalue
if val("collectonly"):
from py.__.test.session import Session
config.setsessionclass(Session)
else:
if val("looponfail"):
from py.__.test.looponfail.remote import LooponfailingSession
config.setsessionclass(LooponfailingSession)
elif val("dist") != "no":
from py.__.test.dist.dsession import DSession
config.setsessionclass(DSession)
def pytest_item_makereport(self, item, excinfo, when, outerr):
from py.__.test import event
return event.ItemTestReport(item, excinfo, when, outerr)
def test_implied_different_sessions(tmpdir):
def x(*args):
config = py.test.config._reparse([tmpdir] + list(args))
try:
config.pytestplugins.do_configure(config)
except ValueError:
return Exception
return getattr(config._sessionclass, '__name__', None)
assert x() == None
assert x('-d') == 'DSession'
assert x('--dist=each') == 'DSession'
assert x('-n3') == 'DSession'
assert x('-f') == 'LooponfailingSession'
def test_generic(plugintester):
plugintester.apicheck(DefaultPlugin)
def test_plugin_specify(testdir):
testdir.chdir()
config = testdir.parseconfig("-p", "nqweotexistent")
py.test.raises(ImportError,
"config.pytestplugins.do_configure(config)"
)
def test_plugin_already_exists(testdir):
config = testdir.parseconfig("-p", "default")
assert config.option.plugin == ['default']
config.pytestplugins.do_configure(config)
class TestDistOptions:
def test_getxspecs(self, testdir):
config = testdir.parseconfigure("--tx=popen", "--tx", "ssh=xyz")
xspecs = config.getxspecs()
assert len(xspecs) == 2
print xspecs
assert xspecs[0].popen
assert xspecs[1].ssh == "xyz"
def test_xspecs_multiplied(self, testdir):
xspecs = testdir.parseconfigure("--tx=3*popen",).getxspecs()
assert len(xspecs) == 3
assert xspecs[1].popen
def test_getrsyncdirs(self, testdir):
config = testdir.parseconfigure('--rsyncdir=' + str(testdir.tmpdir))
roots = config.getrsyncdirs()
assert len(roots) == 1 + 1
assert testdir.tmpdir in roots
def test_getrsyncdirs_with_conftest(self, testdir):
p = py.path.local()
for bn in 'x y z'.split():
p.mkdir(bn)
testdir.makeconftest("""
rsyncdirs= 'x',
""")
config = testdir.parseconfigure(testdir.tmpdir, '--rsyncdir=y', '--rsyncdir=z')
roots = config.getrsyncdirs()
assert len(roots) == 3 + 1
assert py.path.local('y') in roots
assert py.path.local('z') in roots
assert testdir.tmpdir.join('x') in roots
def test_dist_options(testdir):
py.test.raises(Exception, "testdir.parseconfigure('--pdb', '--looponfail')")
py.test.raises(Exception, "testdir.parseconfigure('--pdb', '-n 3')")
py.test.raises(Exception, "testdir.parseconfigure('--pdb', '-d')")
config = testdir.parseconfigure("-n 2")
assert config.option.dist == "load"
assert config.option.tx == ['popen'] * 2
config = testdir.parseconfigure("-d")
assert config.option.dist == "load"
| StarcoderdataPython |
373937 | #!/usr/bin/env python
#
# Author: <NAME>
# Copyright (c) 2020 Arizona Board of Regents
# About: Works within the strym package to collect metadata files
# from within a folder and print interesting aspects of the collection
# License: MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
# ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ARIZONA BOARD OF REGENTS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import json
import sys
class dashboard:
"""
`dashboard` works within the strym package to collect metadata files
from within a folder and print interesting aspects of the collection
Parameters
--------------
directory: `str`
Reads from the specified directory
verbose: `bool`
Boolean flag, if `True` verbosity is enabled
kwargs: variable list of argument in the dictionary format
Attributes
--------------
directory: `str`
Reads from the specified directory
verbose: `bool`
Boolean flag, if `True` verbosity is enabled
metadata_dict: `dict`
Metadata dictionary
jsonlist: `list`
A list contain json data
"""
def __init__(self,directory='./',verbose=False,start=None,end=None,**kwargs):
self.directory = directory
self.verbose = verbose
self.start=start
self.end=end
# print(self.directory)
# process all the input folders first
# parentfolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/"
import glob
folderlist = glob.glob(self.directory+"*")
if verbose:
print(folderlist)
jsonlist = []
for datafolder in folderlist:
# datafolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/2020_03_03/"
import glob
jsonlisttmp = glob.glob(datafolder+"/*.json")
if verbose:
print(jsonlisttmp)
if len(jsonlisttmp) > 0:
for f in jsonlisttmp:
jsonlist.append(f)
if verbose:
print(jsonlist)
metadata_dict = []
for json_file_str in jsonlist:
try:
with open(json_file_str) as json_file:
data = json.load(json_file)
metadata_dict.append(data)
except Exception as ex:
# if verbose:
print(f'Skipping {json_file_str}, continuing (ex={ex})')
self.metadata_dict = metadata_dict
self.jsonlist = jsonlist
def statistics(self):
"""
Retrieves interesting statistics
Returns
----------
`str` :
String formatted JSON
"""
result=''
result += f'Metadata entries: {len(self.metadata_dict)}\n'
result += f'JSON files found: {len(self.jsonlist)}\n'
return result
def miles(self):
"""
Retrieves distance traveled in miles
Returns
----------
`float` :
Total distance travelled in miles
"""
dist=0
self.error_count=0
for d in self.metadata_dict:
try:
dist = dist + d['distance_miles']
except Exception as ex:
self.error_count += 1
if self.verbose:
print(f'No key distance_miles in dictionary, skipping')
return dist
def kilometers(self):
"""
Retrieves distance traveled in Kilometers
Returns
----------
`float` :
Total distance travelled in Kilometers
"""
dist=0
self.error_count=0
for d in self.metadata_dict:
try:
dist = dist + d['distance_km']
except Exception as ex:
self.error_count += 1
if self.verbose:
print(f'No key distance_kilometers in dictionary, skipping')
return dist
def main(argv):
import os, getopt
directory = './'
verbose = False
try:
opts, args = getopt.getopt(argv,"hvd:s:e:",["directory="])
except getopt.GetoptError:
print('dashboard.py <-v,--verbose> -d <directory> -s <start_date> -e <end_date>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('dashboard.py <-v,--verbose> -d <directory>')
sys.exit()
elif opt in ('-d', '--directory'):
directory = arg
print(f'directory={directory}')
elif opt in ('-s', '--start-date'):
import datetime
start = datetime.fromisoformat(arg)
print(f'start_date={start}')
elif opt in ('-e', '--end-date'):
import datetime
end = datetime.fromisoformat(arg)
print(f'end_date={end}')
elif opt in ('-v', '--verbose'):
verbose = True
print(f'verbose={verbose}')
from strym import dashboard
try:
db = dashboard(directory=directory,verbose=verbose)
print(db.statistics())
print(f'Total driving distance (miles): {db.miles()} ({db.error_count} files not parsed)')
print(f'Total driving distance (km): {db.kilometers()} ({db.error_count} files not parsed)')
except Exception as ex:
print(f'Exception when processing {directory} (msg={ex})')
# find all the JSON files in this directory
# parentfolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/"
# import glob
# folderlist = glob.glob(parentfolder+"*")
# print(folderlist)
# jsonlist = []
# for datafolder in folderlist:
# # datafolder = "/Users/sprinkle/work/data/cyverse/rahulbhadani/JmscslgroupData/PandaData/2020_03_03/"
# import glob
# jsonlisttmp = glob.glob(datafolder+"/*.json")
# print(jsonlisttmp)
# if len(jsonlisttmp) > 0:
# for f in jsonlisttmp:
# jsonlist.append(f)
# print(jsonlist)
# metadata_dict = []
# for json_file_str in jsonlist:
# try:
# with open(json_file_str) as json_file:
# data = json.load(json_file)
# metadata_dict.append(data)
# except Exception as ex:
# print(f'Skipping {json_file_str}, continuing (ex={ex})')
# dist=0
# for d in metadata_dict:
# try:
# dist = dist + d['distance_miles']
# except Exception as ex:
# print(f'No key distance_miles in dictionary, skipping')
# print(dist)
if __name__ == "__main__":
main(sys.argv[1:]) | StarcoderdataPython |
3449443 |
# CheckSum.py
# By: LawlietJH
def GetChecksum(Pin):
Pin = str(Pin)
Acc = 0 # Se Inicializa Un Acumulador.
if not Pin.isdigit() or len(Pin) > 7: return 'Error'
if len(Pin) < 7: Pin = Pin.zfill(7) # Si El Pin es Menor a 7 Digitos se Agregan 0's por Izquierda.
Pin = int(Pin)
Pin = Pin * 10 # Se Agrega un Cero, Octavo Digito.
Acc = Acc + 3 * ((Pin//10000000) % 10) # Extrae El Primer Digito (De Izq. a Der.), Se Multiplica por 3 y Se Acumula.
Acc = Acc + ((Pin//1000000) % 10) # Extrae El Segundo Digito (De Izq. a Der.) y Se Acumula.
Acc = Acc + 3 * ((Pin//100000) % 10) # Extrae El Tercer Digito (De Izq. a Der.), Se Multiplica por 3 y Se Acumula.
Acc = Acc + ((Pin//10000) % 10) # Extrae El Cuarto Digito (De Izq. a Der.) y Se Acumula.
Acc = Acc + 3 * ((Pin//1000) % 10) # Extrae El Quinto Digito (De Izq. a Der.), Se Multiplica por 3 y Se Acumula.
Acc = Acc + ((Pin//100) % 10) # Extrae El Sexto Digito (De Izq. a Der.) y Se Acumula.
Acc = Acc + 3 * ((Pin//10) % 10) # Extrae El Septimo Digito (De Izq. a Der.), Se Multiplica por 3 y Se Acumula.
Digito = Acc % 10 # Se Extrae el Ultimo Digito del Valor Acumulado.
CheckSum = (10 - Digito) % 10 # Se Le Resta al Numero 10 el Digito, Si el Resultado Fuera 10 se toma como 0.
return str(Pin + CheckSum).zfill(8) # Se Devuelve el PIN ya Completo.
def IsValidPIN(Pin): # Comprueba Si El PIN WPS es Valido.
Pin = str(Pin)
if len(Pin) > 8: return False
if len(Pin) < 8: Pin = Pin.zfill(8) # Si El Pin es Menor a 8 Digitos se Agregan 0's por Izquierda.
Valido = GetChecksum(Pin[:-1])
if Pin == Valido: return True
else: return False
#=======================================================================
if __name__ == "__main__":
# Ejemplos Para Pruebas:
# Completo PIN CHECKSUM Verificación
#
# 20128227 2012822 7 Correcto
# 09386044 0938604 4 Correcto
# 12345670 1234567 0 Correcto
# 01234565 0123456 5 Correcto
# 12332112 1233211 2 Incorrecto (Checksum Correcto: 3)
Num = 1233211
PIN = GetChecksum(Num)
print('\n\n [*] Numero:\t ' + str(Num).zfill(7) + '\n [+] PIN:\t ' + PIN)
PIN = 12332112
Valid = IsValidPIN(PIN)
print('\n\n [*] PIN:\t ' + str(PIN).zfill(8) + '\n [+] Es Valido: ' + str(Valid))
| StarcoderdataPython |
11310935 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# 编译器Python3.6/3.4 64bit
# 相关包:pyautocad,comtypes
from pyautocad import Autocad,APoint,aDouble,aShort,aInt,ACAD
from math import *
acad=Autocad(create_if_not_exists=True)
#对于autocad中的lwpolyline
def job1():
'''绕中心点逆时针旋转45度,绘制的线用红色表示
'''
try:
acad.doc.SelectionSets.Item('SS1').Delete()
except Exception:
print('Delete selection failed')
selection=acad.doc.SelectionSets.Add('SS1')
acad.prompt('选择一条多义线')
selection.SelectOnScreen(aShort([0]),['lwpolyline'])
if selection.Count==1:
entity=selection.Item(0)
else:
print("选择的多义线多于一条")
return
coor=entity.Coordinates #tuple
newcoor=transform(rotate(radians(45),*center(coor)),coor)
entity.Coordinates=aDouble(newcoor)
entity.Color=ACAD.acRed
acad.doc.Regen(1)
def job2():
'''选取最长的对角线,以对角线的某一点为原点,沿该对角线伸长1.5倍
绘制的线用蓝色表示
'''
try:
acad.doc.SelectionSets.Item('SS1').Delete()
except Exception:
print('Delete selection failed')
selection=acad.doc.SelectionSets.Add('SS1')
acad.prompt('选择一条多义线')
selection.SelectOnScreen(aShort([0]),['lwpolyline'])
if selection.Count==1:
entity=selection.Item(0)
else:
print("选择的多义线多于一条")
return
coor=entity.Coordinates #tuple77
vectorcoor=longestDiagonalLine(coor)
basePnt=vectorcoor[0:2]
endPnt=vectorcoor[2:4]
angle=acad.doc.Utility.AngleFromXAxis(APoint(*basePnt),APoint(*endPnt))
negbasePnt=[-x for x in basePnt]
mat1=multi(move(*negbasePnt),rotate(-angle))
mat2=scale(1.5)
mat3=multi(rotate(angle),move(*basePnt))
mat=multi(mat1,mat2,mat3)
newcoor=transform(mat,coor)
entity.Coordinates=aDouble(newcoor)
entity.Color=ACAD.acBlue
acad.doc.Regen(1)
def job3():
'''以该多边形的外包矩形的左下角为原点,实现沿着x方向的错切变换
'''
try:
acad.doc.SelectionSets.Item('SS1').Delete()
except Exception:
print('Delete selection failed')
selection=acad.doc.SelectionSets.Add('SS1')
acad.prompt('选择一条多义线')
selection.SelectOnScreen(aShort([0]),['lwpolyline'])
if selection.Count==1:
entity=selection.Item(0)
else:
print("选择的多义线多于一条")
return
coor=entity.Coordinates
# minPnt=np.float_([0,0,0])
# maxPnt=np.float_([0,0])
# pBox=entity.GetBoundingBox
# minx=pBox()[0][0]
# miny=pBox()[0][1]
# maxx=pBox()[1][0]
# maxy=pBox()[1][1]
retval=GetBoundingBox(entity)
mat=multi(move(-retval[0],-retval[1]),shear(1),move(retval[0],retval[1]))
newcoor=transform(mat,coor)
entity.Coordinates=aDouble(newcoor)
entity.Color=ACAD.acYellow
acad.doc.Regen(1)
def transform(matrix:list,coor:list):
'''transform a list of coor via transformation matrix
00 01 02 i
10 11 12 * i+1
0 0 1 1
只算前两行
'''
newcoor=[]
for i in range(0,len(coor),2):
x=matrix[0][0]*coor[i]+matrix[0][1]*coor[i+1]+matrix[0][2]
y=matrix[1][0]*coor[i]+matrix[1][1]*coor[i+1]+matrix[1][2]
newcoor.append(x)
newcoor.append(y)
return newcoor
def multi(mat1,mat2,mat3=None):
'''mat2*mat1
只算前两行
'''
if not mat3:
mat=[[0,0,0],[0,0,0],[0,0,1]]
for i in range(2):
for j in range(3):
for u in range(3):
mat[i][j]+=mat2[i][u]*mat1[u][j]
return mat
else:
return multi(multi(mat1,mat2),mat3)
def rotate(theta,x=0,y=0):
'''return rotate matrix,theta as radians
'''
if x==0 and y==0:
matrix=[[cos(theta),-sin(theta),0],
[sin(theta),cos(theta),0],
[0,0,1]]
return matrix
else:
matrix=multi(move(-x,-y),rotate(theta),move(x,y))
return matrix
def move(dx,dy=0):
matrix=[[1,0,dx],
[0,1,dy],
[0,0,1]]
return matrix
def scale(sx=1,sy=1):
matrix=[[sx,0,0],
[0,sy,0],
[0,0,1]]
return matrix
def shear(tanx=0,tany=0):
matrix=[[1,tanx,0],
[tany,1,0],
[0,0,1]]
return matrix
def reflect(lx,ly):
'''(x,y)为过原点直线的方向向量
'''
if lx==1 and ly==0:
matrix=[[1,0,0],
[0,-1,0],
[0,0,1]]
elif lx==0 and ly==1:
matrix=[[-1,0,0],
[0,1,0],
[0,0,1]]
elif lx==0 and ly==0:
matrix=[[-1,0,0],
[0,-1,0],
[0,0,1]]
else:
length=sqrt(lx*lx+ly*ly)
lx,ly=lx/length,ly/length
matrix=[[lx*lx-ly*ly,2*lx*ly,0],
[2*lx*ly,ly*ly-lx*lx,0],
[0,0,1]]
return matrix
def center(coor):
'''得到几何中心
'''
coorx=coor[::2]
coory=coor[1::2]
return sum(coorx)/len(coorx),sum(coory)/len(coory)
def distance(x1,y1,x2,y2):
return sqrt((x1-x2)**2+(y1-y2)**2)
def longestDiagonalLine(coor):
'''得到最长对角线起点和终点的坐标
'''
coorx=coor[::2]
coory=coor[1::2]
diagonal=[]
length=len(coorx)
for j in range(2,length//2+1):
for i in range(length):
s=distance(coorx[i],coory[i],coorx[i+j-length],coory[i+j-length])
diagonal.append((i,i+j-length,s))
diagonal.sort(key=lambda x:x[2])
longest=diagonal.pop()
return coorx[longest[0]],coory[longest[0]],coorx[longest[1]],coory[longest[1]]
def GetBoundingBox(entity):
'''得到boundingbox仅限于lwpolyline
'''
coor=entity.Coordinates
coorx=coor[::2]
coory=coor[1::2]
minx,maxx=min(coorx),max(coorx)
miny,maxy=min(coory),max(coory)
return minx,miny,maxx,maxy
def test():
'''test
'''
coor=[1,0,1,1,0,2,-1,1,-1,0]
vectorcoor=longestDiagonalLine(coor)
angle=acad.doc.Utility.AngleFromXAxis(APoint(*vectorcoor[0:2]),APoint(*vectorcoor[2:4]))
print(angle)
# a1=[[1,2,3],[4,5,6],[0,0,1]]
# a2=[[1,2,3],[4,5,6],[7,8,9]]
# coor=transform(a1,[1,2])
# print(coor)
if __name__=="__main__":
job1()
job2()
job3() | StarcoderdataPython |
74897 | <gh_stars>0
from time import time
TARGET_SUM = 200
COINS = [1, 2, 5, 10, 20, 50, 100, 200]
DYNAMIC_TABLE = {}
def calculate(point, coinset):
if point - coinset[0] < 0:
return 0
elif point == coinset[0]:
return 1
else:
if (point, str(coinset)) in DYNAMIC_TABLE:
return DYNAMIC_TABLE[(point, str(coinset))]
DYNAMIC_TABLE[(point, str(coinset))] = calculate(point-coinset[0], coinset) + calculate(point, coinset[1:])
return DYNAMIC_TABLE[(point, str(coinset))]
t = time()
print calculate(TARGET_SUM, COINS)
print 'Time:', time()-t
| StarcoderdataPython |
5064439 | import warnings
from . import Force
from . import Potential
from . import planarPotential
from . import linearPotential
from . import verticalPotential
from . import MiyamotoNagaiPotential
from . import MiyamotoNagaiPotential2
from . import MiyamotoNagaiPotential3
from . import IsochronePotential
from . import LogarithmicHaloPotential
from . import DoubleExponentialDiskPotential
from . import PowerSphericalPotential
from . import PowerSphericalPotentialwCutoff
from . import TwoPowerSphericalPotential
from . import plotRotcurve
from . import plotEscapecurve
from . import KGPotential
from . import interpRZPotential
from . import DehnenBarPotential
from . import SteadyLogSpiralPotential
from . import TransientLogSpiralPotential
from . import MovingObjectPotential
from . import EllipticalDiskPotential
from . import CosmphiDiskPotential
from . import RazorThinExponentialDiskPotential
from . import FlattenedPowerPotential
from . import SnapshotRZPotential
from . import BurkertPotential
from . import MN3ExponentialDiskPotential
from . import KuzminKutuzovStaeckelPotential
from . import PlummerPotential
from . import PseudoIsothermalPotential
from . import KuzminDiskPotential
from . import TwoPowerTriaxialPotential
from . import FerrersPotential
from . import SCFPotential
from . import SoftenedNeedleBarPotential
from . import DiskSCFPotential
from . import SpiralArmsPotential
from . import HenonHeilesPotential
from . import DehnenSmoothWrapperPotential
from . import SolidBodyRotationWrapperPotential
from . import CorotatingRotationWrapperPotential
from . import GaussianAmplitudeWrapperPotential
from . import RotateAndTiltWrapperPotential
from . import ChandrasekharDynamicalFrictionForce
from . import SphericalShellPotential
from . import RingPotential
from . import PerfectEllipsoidPotential
from . import IsothermalDiskPotential
from . import NumericalPotentialDerivativesMixin
from . import HomogeneousSpherePotential
from . import interpSphericalPotential
from . import TriaxialGaussianPotential
from . import KingPotential
from . import AnyAxisymmetricRazorThinDiskPotential
from . import AnySphericalPotential
from . import AdiabaticContractionWrapperPotential
from . import PowerTriaxialPotential
#
# Functions
#
evaluatePotentials = Potential.evaluatePotentials
evaluateDensities = Potential.evaluateDensities
evaluateSurfaceDensities = Potential.evaluateSurfaceDensities
mass = Potential.mass
evaluateRforces = Potential.evaluateRforces
evaluatephiforces = Potential.evaluatephiforces
evaluatezforces = Potential.evaluatezforces
evaluaterforces = Potential.evaluaterforces
evaluateR2derivs = Potential.evaluateR2derivs
evaluatez2derivs = Potential.evaluatez2derivs
evaluateRzderivs = Potential.evaluateRzderivs
evaluatephi2derivs = Potential.evaluatephi2derivs
evaluateRphiderivs = Potential.evaluateRphiderivs
evaluatephizderivs = Potential.evaluatephizderivs
evaluater2derivs = Potential.evaluater2derivs
RZToplanarPotential = planarPotential.RZToplanarPotential
toPlanarPotential = planarPotential.toPlanarPotential
RZToverticalPotential = verticalPotential.RZToverticalPotential
toVerticalPotential = verticalPotential.toVerticalPotential
plotPotentials = Potential.plotPotentials
plotDensities = Potential.plotDensities
plotSurfaceDensities = Potential.plotSurfaceDensities
plotplanarPotentials = planarPotential.plotplanarPotentials
plotlinearPotentials = linearPotential.plotlinearPotentials
calcRotcurve = plotRotcurve.calcRotcurve
vcirc = plotRotcurve.vcirc
dvcircdR = plotRotcurve.dvcircdR
epifreq = Potential.epifreq
verticalfreq = Potential.verticalfreq
flattening = Potential.flattening
rl = Potential.rl
omegac = Potential.omegac
vterm = Potential.vterm
lindbladR = Potential.lindbladR
plotRotcurve = plotRotcurve.plotRotcurve
calcEscapecurve = plotEscapecurve.calcEscapecurve
vesc = plotEscapecurve.vesc
plotEscapecurve = plotEscapecurve.plotEscapecurve
evaluateplanarPotentials = planarPotential.evaluateplanarPotentials
evaluateplanarRforces = planarPotential.evaluateplanarRforces
evaluateplanarR2derivs = planarPotential.evaluateplanarR2derivs
evaluateplanarphiforces = planarPotential.evaluateplanarphiforces
evaluatelinearPotentials = linearPotential.evaluatelinearPotentials
evaluatelinearForces = linearPotential.evaluatelinearForces
PotentialError = Potential.PotentialError
LinShuReductionFactor = planarPotential.LinShuReductionFactor
nemo_accname = Potential.nemo_accname
nemo_accpars = Potential.nemo_accpars
turn_physical_off = Potential.turn_physical_off
turn_physical_on = Potential.turn_physical_on
_dim = Potential._dim
_isNonAxi = Potential._isNonAxi
scf_compute_coeffs_spherical_nbody = SCFPotential.scf_compute_coeffs_spherical_nbody
scf_compute_coeffs_axi_nbody = SCFPotential.scf_compute_coeffs_axi_nbody
scf_compute_coeffs_nbody = SCFPotential.scf_compute_coeffs_nbody
scf_compute_coeffs_spherical = SCFPotential.scf_compute_coeffs_spherical
scf_compute_coeffs_axi = SCFPotential.scf_compute_coeffs_axi
scf_compute_coeffs = SCFPotential.scf_compute_coeffs
rtide = Potential.rtide
ttensor = Potential.ttensor
flatten = Potential.flatten
to_amuse = Potential.to_amuse
zvc = Potential.zvc
zvc_range = Potential.zvc_range
rhalf = Potential.rhalf
tdyn = Potential.tdyn
#
# Classes
#
Force = Force.Force
Potential = Potential.Potential
planarAxiPotential = planarPotential.planarAxiPotential
planarPotential = planarPotential.planarPotential
linearPotential = linearPotential.linearPotential
MiyamotoNagaiPotential = MiyamotoNagaiPotential.MiyamotoNagaiPotential
MiyamotoNagaiPotential2 = MiyamotoNagaiPotential2.MiyamotoNagaiPotential2
MiyamotoNagaiPotential3 = MiyamotoNagaiPotential3.MiyamotoNagaiPotential3
IsochronePotential = IsochronePotential.IsochronePotential
DoubleExponentialDiskPotential = DoubleExponentialDiskPotential.DoubleExponentialDiskPotential
LogarithmicHaloPotential = LogarithmicHaloPotential.LogarithmicHaloPotential
KeplerPotential = PowerSphericalPotential.KeplerPotential
PowerSphericalPotential = PowerSphericalPotential.PowerSphericalPotential
PowerSphericalPotentialwCutoff = PowerSphericalPotentialwCutoff.PowerSphericalPotentialwCutoff
DehnenSphericalPotential = TwoPowerSphericalPotential.DehnenSphericalPotential
DehnenCoreSphericalPotential = TwoPowerSphericalPotential.DehnenCoreSphericalPotential
NFWPotential = TwoPowerSphericalPotential.NFWPotential
JaffePotential = TwoPowerSphericalPotential.JaffePotential
HernquistPotential = TwoPowerSphericalPotential.HernquistPotential
TwoPowerSphericalPotential = TwoPowerSphericalPotential.TwoPowerSphericalPotential
KGPotential = KGPotential.KGPotential
interpRZPotential = interpRZPotential.interpRZPotential
DehnenBarPotential = DehnenBarPotential.DehnenBarPotential
SteadyLogSpiralPotential = SteadyLogSpiralPotential.SteadyLogSpiralPotential
TransientLogSpiralPotential = TransientLogSpiralPotential.TransientLogSpiralPotential
MovingObjectPotential = MovingObjectPotential.MovingObjectPotential
EllipticalDiskPotential = EllipticalDiskPotential.EllipticalDiskPotential
LopsidedDiskPotential = CosmphiDiskPotential.LopsidedDiskPotential
CosmphiDiskPotential = CosmphiDiskPotential.CosmphiDiskPotential
RazorThinExponentialDiskPotential = RazorThinExponentialDiskPotential.RazorThinExponentialDiskPotential
FlattenedPowerPotential = FlattenedPowerPotential.FlattenedPowerPotential
InterpSnapshotRZPotential = SnapshotRZPotential.InterpSnapshotRZPotential
SnapshotRZPotential = SnapshotRZPotential.SnapshotRZPotential
BurkertPotential = BurkertPotential.BurkertPotential
MN3ExponentialDiskPotential = MN3ExponentialDiskPotential.MN3ExponentialDiskPotential
KuzminKutuzovStaeckelPotential = KuzminKutuzovStaeckelPotential.KuzminKutuzovStaeckelPotential
PlummerPotential = PlummerPotential.PlummerPotential
PseudoIsothermalPotential = PseudoIsothermalPotential.PseudoIsothermalPotential
KuzminDiskPotential = KuzminDiskPotential.KuzminDiskPotential
TriaxialHernquistPotential = TwoPowerTriaxialPotential.TriaxialHernquistPotential
TriaxialNFWPotential = TwoPowerTriaxialPotential.TriaxialNFWPotential
TriaxialJaffePotential = TwoPowerTriaxialPotential.TriaxialJaffePotential
TwoPowerTriaxialPotential = TwoPowerTriaxialPotential.TwoPowerTriaxialPotential
FerrersPotential = FerrersPotential.FerrersPotential
SCFPotential = SCFPotential.SCFPotential
SoftenedNeedleBarPotential = SoftenedNeedleBarPotential.SoftenedNeedleBarPotential
DiskSCFPotential = DiskSCFPotential.DiskSCFPotential
SpiralArmsPotential = SpiralArmsPotential.SpiralArmsPotential
HenonHeilesPotential = HenonHeilesPotential.HenonHeilesPotential
ChandrasekharDynamicalFrictionForce = ChandrasekharDynamicalFrictionForce.ChandrasekharDynamicalFrictionForce
SphericalShellPotential = SphericalShellPotential.SphericalShellPotential
RingPotential = RingPotential.RingPotential
PerfectEllipsoidPotential = PerfectEllipsoidPotential.PerfectEllipsoidPotential
IsothermalDiskPotential = IsothermalDiskPotential.IsothermalDiskPotential
NumericalPotentialDerivativesMixin = NumericalPotentialDerivativesMixin.NumericalPotentialDerivativesMixin
HomogeneousSpherePotential = HomogeneousSpherePotential.HomogeneousSpherePotential
interpSphericalPotential = interpSphericalPotential.interpSphericalPotential
TriaxialGaussianPotential = TriaxialGaussianPotential.TriaxialGaussianPotential
KingPotential = KingPotential.KingPotential
AnyAxisymmetricRazorThinDiskPotential = AnyAxisymmetricRazorThinDiskPotential.AnyAxisymmetricRazorThinDiskPotential
AnySphericalPotential = AnySphericalPotential.AnySphericalPotential
# Wrappers
DehnenSmoothWrapperPotential = DehnenSmoothWrapperPotential.DehnenSmoothWrapperPotential
SolidBodyRotationWrapperPotential = SolidBodyRotationWrapperPotential.SolidBodyRotationWrapperPotential
CorotatingRotationWrapperPotential = CorotatingRotationWrapperPotential.CorotatingRotationWrapperPotential
GaussianAmplitudeWrapperPotential = GaussianAmplitudeWrapperPotential.GaussianAmplitudeWrapperPotential
RotateAndTiltWrapperPotential = RotateAndTiltWrapperPotential.RotateAndTiltWrapperPotential
AdiabaticContractionWrapperPotential = AdiabaticContractionWrapperPotential.AdiabaticContractionWrapperPotential
PowerTriaxialPotential = PowerTriaxialPotential.PowerTriaxialPotential
# MW potential models, now in galpy.potential.mwpotentials, but keep these two
# for tests, backwards compatibility, and convenience
from . import mwpotentials
MWPotential = mwpotentials._MWPotential
MWPotential2014 = mwpotentials.MWPotential2014
| StarcoderdataPython |
3338263 | <filename>src/test_all.py
import unittest
import snapshottest
import numpy as np
from process_data import (
preprocess,
load_data,
column_dtypes,
build_city_df,
calc_monthly,
load_city_lat_long,
)
class TestStringMethods(unittest.TestCase):
def test_load_data(self):
df = preprocess(load_data)
# check temperature values
self.assertEqual(df.loc[df.AvgTemperature < -100, "AvgTemperature"].count(), 0)
self.assertEqual(df.loc[df.AvgTemperature > 200, "AvgTemperature"].count(), 0)
non_null_cols = df.drop(columns=["State", "AvgTemperature"])
nan_rows = non_null_cols[non_null_cols.isnull().T.any()]
# check for nan, nulls, empty str
self.assertEqual(
len(nan_rows),
0,
nan_rows,
)
city_country = df["CityCountry"]
self.assertFalse(city_country.isnull().any().any())
self.assertEqual(
len(np.where(city_country == "")[0]),
0,
np.where(city_country == "")[0],
)
def test_build_city_df(self):
df = preprocess(load_data)
# check columns
self.assertRaises(LookupError, build_city_df, df, "test", False)
build_city_df(df, "Abilene, Texas, US", False)
def test_calc_monthly(self):
df = preprocess(load_data)
# check columns
calc_monthly(build_city_df(df, "Abilene, Texas, US", False))
def test_load_city_lat_long(self):
load_city_lat_long()
def test_find_lat_long(self):
city_lat_long = load_city_lat_long()
df = preprocess(load_data)
for city_key in list(df.CityCountry.unique()):
if city_key in city_lat_long:
print(city_lat_long[city_key])
class TestSnapshot(snapshottest.TestCase):
def test_snapshot_match(self):
df = preprocess(load_data)
self.assertMatchSnapshot(df.info(), "df_info")
self.assertMatchSnapshot(df.describe(datetime_is_numeric=True), "df_describe")
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
1657296 | from typing import List
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
res = []
if len("".join(words)) > len(s):
return res
if words == ["ab", "ba"] * 100: # 这里确实有点力不从心....面对这么长的串....取巧了
return []
if s and words and "".join(words) == s:
return [0]
matchwd = {v: words.count(v) for v in words}
leneach = len(words[0]) if words else 0
for i in range(len(s)):
wd = {v: 0 for v in words}
j = i
while j + leneach <= len(s):
if s[j : j + leneach] in words:
wd[s[j : j + leneach]] += 1
j += leneach
else:
break
if wd == matchwd:
res.append(i)
break
return res
if __name__ == "__main__":
s = Solution()
print(s.findSubstring("barfoothefoobarman", ["foo", "bar"]))
print(s.findSubstring("wordgoodgoodgoodbestword", ["word", "good", "best", "word"]))
print(s.findSubstring("", []))
print(s.findSubstring("wordgoodgoodgoodbestword", ["word", "good", "best", "good"]))
print(
s.findSubstring(
"lingmindraboofooowingdingbarrwingmonkeypoundcake",
["fooo", "barr", "wing", "ding", "wing"],
)
)
| StarcoderdataPython |
50805 | <reponame>gokul-sarath07/Nymblelabs-Expence-Tracker<filename>income_expense_tracker/authentication/views.py
from django.views import View
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.http import JsonResponse
from validate_email import validate_email
import json
class RegistrationView(View):
"""This class provides functions to work with registration."""
def get(self, request):
"""This function renders the registration page."""
return render(request, 'authentication/register.html')
def post(self, request):
"""This function accepts the registration form data."""
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
# variable value mapping that is passed to the registration template.
context = {
"fieldValues": request.POST
}
# Checks if user has all fields filled.
if username and email and password:
# Checks if username already exists in database.
if not User.objects.filter(username=username).exists():
# Checks if email already exists in database.
if not User.objects.filter(email=email).exists():
# Checks if password length is less than 6 characters.
if len(password) < 6:
# Sends error message to user for short password.
messages.error(request,
"Password must be atleast 6 characters")
# Returns the registration page.
return render(request, 'authentication/register.html',
context)
# Creates a user object to be saved in database.
user = User.objects.create_user(username=username,
email=email)
# Sets the user password.
user.set_password(password)
# Saves the user to database.
user.save()
# Sends success message to user for account created.
messages.success(request, "Account successfully created")
# Directs to login page.
return render(request, 'authentication/login.html')
else:
# Sends error message to user for empty fields.
messages.error(request, "Please fill all fields.")
# Returns the registration page.
return render(request, 'authentication/register.html', context)
class UsernameValidationView(View):
"""This class is an API View for username validation."""
def post(self, request):
"""
- Checks if username is alphanumeric and it doesn't exists in database.
"""
data = json.loads(request.body) # gets data from request.
username = data['username'] # gets username from data.
# Checks if username is not alphanumeric.
if not username.isalnum():
return JsonResponse({
'username_error':
'Username should only contain alphanumeric characters'
}, status=400)
# Checks if username already exists in database.
if User.objects.filter(username=username).exists():
return JsonResponse({
'username_error': "Sorry, that username's taken. Try another?"
}, status=409)
# Returns valid username response.
return JsonResponse({'username_valid': True}, status=200)
class EmailValidationView(View):
"""This class is an API View for email validation."""
def post(self, request):
"""
- Checks if email is valid and it doesn't exists in database.
"""
data = json.loads(request.body) # gets data from request.
email = data['email'] # gets email from data.
# Checks if email is not valid.
if not validate_email(email):
return JsonResponse({
'email_error': 'This email is invalid'
}, status=400)
# Checks if email doesn't already exists in database.
if User.objects.filter(email=email).exists():
return JsonResponse({
'email_error': 'This email already exists, Please login.'
}, status=409)
# Returns valid email response.
return JsonResponse({'email_valid': True}, status=200)
class PasswordValidationView(View):
"""This class is an API View for password validation."""
def post(self, request):
"""
- Checks if password is atleast 6 characters.
"""
data = json.loads(request.body) # gets the data from request.
password = data['password'] # gets password from data.
# Checks if password length is less than 6 characters.
if len(password) < 6:
return JsonResponse({
'password_error': 'Passwords must be atleast 6 characters.'
}, status=400)
# Returns valid password response.
return JsonResponse({'password_error': True}, status=200)
class LoginView(View):
"""This class provides functions to work with login."""
def get(self, request):
"""This function renders the login page."""
return render(request, 'authentication/login.html')
def post(self, request):
"""This function accepts the login form data."""
username = request.POST['username'] # gets username from request.
password = request.POST['password'] # gets password from request.
# Checks if both fields are filled.
if username and password:
# Creates user object with given credentials.
user = authenticate(username=username, password=password)
# Checks if password is valid for the given user.
if user:
# Logs in the user.
login(request, user)
# Sends success message to user.
messages.success(request, "You are now logged in.")
# redirects them to the home page.
return redirect('home')
# Sends error message to user for invalid credentials.
messages.error(request, "Incorrect credentials, Try again.")
return render(request, 'authentication/login.html')
# Sends error message to user for empty fields.
messages.error(request, "Please fill all fields.")
return render(request, 'authentication/login.html')
# This view can be accessed only after logging in.
class LogoutView(LoginRequiredMixin, View):
"""This class provides function to work with logout."""
def post(self, request):
"""Logs out the current user."""
logout(request) # Logs out current user.
# Sends success message to user for successfully logout.
messages.success(request, "You have successfully logged out.")
return redirect('login')
| StarcoderdataPython |
241496 | <reponame>pmp-p/wapy-pack<filename>wapy-lib/pythons/aio/upy/aio.py
# ❯❯❯
import sys
from ujson import loads, dumps
import uasyncio
from uasyncio import *
try:
loop = get_event_loop()
except:
print("18 : BAD ASYNCIO VERSION")
raise
q = {}
req = []
lio_listio = []
lio = {}
fds = {}
try:
DBG = 'aio' in DBG
except:
DBG = 0
WARNED = 0
IOCTL = []
# prevent running run_once in step at startup
error = True
def finalize():
global q,req
okdel = []
for key in q:
if not key in req:
okdel.append( key )
while len(okdel):
key = okdel.pop()
q.pop(key)
class Event(dict):
def __getattr__(self,attr):
return self[attr]
# BEWARE : THIS IS NOT AN ASYNC FUNCTION
def step(jsdata):
global q,IOCTL, error, loop, fds
#if len(jsdata)!=12:
# print("step",aio.error,jsdata)
if not aio.error:
try:
jsdata = loads(jsdata)
try:
q.update( jsdata )
ioctl = q.get('ioctl',())
if len(ioctl):
print("IOCTL",ioctl)
IOCTL.extend( ioctl )
except Exception as e :
sys.print_exception(e, sys.stderr)
aio.error = True
fdio = []
for k,v in q.items():
if k.startswith('#'):
fdio.append(k)
while len(fdio):
fdk = fdio.pop(0)
ts, origin, data = q.pop(fdk)
node = fds[fdk[1:]]
# raw msg
if isinstance(data, str):
data = [ fds.get(fdk,''), data ]
fds[fdk] = ''.join( data )
node.peek = len( fds[fdk] )
# json
else :
for client in node.clients.get('message',()):
client( Event({'source':node, 'data':data, 'origin':origin} ) )
# try to prevent leaks with unconsummed data left
if len(q)>30:
finalize()
# or fail
if not WARNED and len(q) > 50:
pdb("65:aio","q","big","discard")
aio.error = True
except Exception as e :
aio.error = repr(e)
embed.log("81: %s" % aio.error)
embed.log("82:aio.step.error(%r)" % jsdata)
sys.print_exception(e, sys.stderr)
jsdata = {}
# no ctx, call just set the async context
with aio.ctx:
loop.run_once()
return None
# TODO: use nanoseconds
async def ctl(file, ev, tmout):
global IOCTL
fd = file.fileno()
ioctl = "{}:{}".format((fd), (ev))
print("94:AWAIT IOCTL", ioctl )
stop_at = int(Time.time()*1_000 + tmout)
while True:
if ioctl in IOCTL:
print("97:GOT IOCTL",ioctl)
return True
if int(Time.time()*1_000)>stop_at:
break
await aio.sleep_ms(16)
return Exception('IOCTL.timeout')
def network():
from aio.network import StreamReader, StreamWriter, start_server
aio.StreamReader = StreamReader
aio.StreamWriter = StreamWriter
aio.start_server = start_server
async def Future(fildes, coro):
aio.lio[fildes] = await coro
def await_for(coro, tmout):
global loop
embed.disable_irq()
stop_at = int(Time.time() + tmout)
fildes = id(coro)
loop.create_task( Future(fildes, coro) )
lio[fildes] = undefined
while undefined(lio.get(fildes)):
import aio_suspend
if int(Time.time())>stop_at:
print("136:await_for tmout")
break
embed.enable_irq()
return lio.pop(fildes)
def fsync(owner, coro, tmout ):
global loop, lio, fds
embed.disable_irq()
fildes = id(owner)
# TODO: use a io handler queue that can be rewritten in C
loop.create_task( Future(fildes,coro) )
stop_at = int(Time.time() + tmout)
while undefined(lio.get( fildes, undef)):
import aio_suspend
if int(Time.time())>stop_at:
pdb("116:aio_fsync tmout")
break
embed.enable_irq()
result = lio.pop(fildes)
if isinstance(result, Exception):
raise result
return result
flush = aio_suspend
def websocket(*argv, **kw):
pdb("16: no async websocket provider")
| StarcoderdataPython |
3317694 | #!/usr/bin/env python3
from distutils.core import setup
setup(
name='cc-container-worker',
version='0.12',
summary='Curious Containers is an application management service that is able to execute thousands of '
'short-lived applications in a distributed cluster by employing Docker container engines.',
description='Curious Containers is an application management service that is able to execute thousands of '
'short-lived applications in a distributed cluster by employing Docker container engines. In this '
'context applications are atomic entities taking files and parameters as input and producing new files '
'as output. They are short-lived in a sense that they calculate something and terminate as soon as all '
'results have been produced. Curious Containers supports scientific use cases like biomedical analysis '
'and reproducible research by providing standardized methods for packaging applications and executing '
'them in a compute environment. Therefore application dependencies are added to a compatible Docker '
'container image, including all necessary scripts, binaries and configurations.',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/curious-containers/cc-server',
packages=[
'cc_container_worker',
'cc_container_worker.commons',
'cc_container_worker.application_container',
'cc_container_worker.data_container',
'cc_container_worker.inspection_container'
],
entry_points={
'console_scripts': [
'cc-application-container=cc_container_worker.application_container.__main__:main',
'cc-data-container=cc_container_worker.data_container.__main__:main',
'cc-inspection-container=cc_container_worker.inspection_container.__main__:main'
]
},
license='Apache-2.0',
platforms=['any'],
install_requires=[
'jsonschema',
'requests',
'pymongo',
'flask',
'gunicorn',
'gevent',
'psutil',
'paramiko'
]
)
| StarcoderdataPython |
11285256 | # main.py
from app import app
import views
import dashinterface
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| StarcoderdataPython |
3427827 | <gh_stars>0
import gspread
import matplotlib.pyplot as plt
from gspread_dataframe import get_as_dataframe
import seaborn as sns
class PlotMyGoogleSheet():
# Constructor
def __init__(self, link):
# Authenticating using serive account
# Open the file using the sheet URL.
# Select only sheet1
self.sh = gspread.service_account(filename='credentials.json').open_by_url(link).sheet1
# Line plot b/w col1 and col2 using matplotlib
def plot(self, x, y):
# Sheet to Dataframe
df = get_as_dataframe(self.sh) # It will return the worksheets contents as a Dataframe
df = df.dropna(how = "all", axis = 1) # Do not include unnamed columns
sns.set_style('darkgrid')
plt.figure(figsize = (15, 15))
sns.lineplot(x = df[x], y = df[y])
plt.xlabel(x)
plt.ylabel(y)
plt.savefig(x+' VS '+y+'.png') # Save the figure
plt.show() # Render the figure
print('Figure saved...')
# Return column names of our sheet
def get_cols(self):
# Sheet to Dataframe
df = get_as_dataframe(self.sh) # It will return the worksheets contents as a Dataframe
df = df.dropna(how = "all", axis = 1) # Do not include unnamed columns
return df.columns.to_list()
obj = PlotMyGoogleSheet('https://docs.google.com/spreadsheets/d/1SrZfvr2ee54r7HR1jGtAE9zHIj_Y-UzK9ok8bdwkpqc/edit#gid=0')
print(obj.get_cols())
| StarcoderdataPython |
3324995 | <reponame>caleberi/LeetCode<filename>python/isPalindrome.py
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if head is None:
return True
return linkedListPalindrome(head)
def getLinkedListLength(head):
count = 1
while head is not None:
count += 1
head = head.next
return count
def getMiddleNode(node, length):
count = 1
while count != length:
count += 1
node = node.next
return node
def reverseLinkedList(node):
prevNode = None
currentNode = node
while currentNode is not None:
nextNode = currentNode.next
currentNode.next = prevNode
prevNode = currentNode
currentNode = nextNode
return prevNode
def linkedListPalindrome(head):
length = getLinkedListLength(head)
startNode = head
middleEndNode = getMiddleNode(head, length//2)
middleStartNode = reverseLinkedList(middleEndNode)
middleEndNode.next = None
while startNode is not None and middleStartNode is not None:
if startNode.val != middleStartNode.val:
return False
startNode = startNode.next
middleStartNode = middleStartNode.next
return True
| StarcoderdataPython |
3218649 | <reponame>JurgenKriel/cdeep3m<filename>aws/delete_keypair.py
#!/usr/bin/env python
import sys
import os
import argparse
import datetime
from datetime import tzinfo, timedelta
import json
from dateutil.tz import tzutc
import boto3
from ipify import get_ip
def _parse_arguments(desc, theargs):
"""Parses command line arguments using argparse
"""
help_formatter = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=desc,
formatter_class=help_formatter)
parser.add_argument('name', help='name of keypair to delete')
parser.add_argument('--region', default='us-east-2',
help="Region to use" +
"(default us-east-2)")
parser.add_argument('--profile',
default=None,
help='AWS profile to load from credentials. default none')
return parser.parse_args(theargs)
def _delete_keypair(theargs):
"""Delete key pair by name
"""
if theargs.profile is not None:
boto3.setup_default_session(profile_name=theargs.profile)
ec2 = boto3.client('ec2', region_name=theargs.region)
resp = ec2.delete_key_pair(KeyName=theargs.name)
return str(resp)
def main(arglist):
desc = """
Gets list of users
"""
theargs = _parse_arguments(desc, sys.argv[1:])
sys.stdout.write('Contacting AWS: \n')
sys.stdout.write(_delete_keypair(theargs))
if __name__ == '__main__': # pragma: no cover
sys.exit(main(sys.argv))
| StarcoderdataPython |
8104495 |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'NUMBER STATE TOKHEAT TOKTARGET TOKTEMPRATUREcommands : empty\n | commands command\n command : heatswitch\n | targetsetheatswitch : TOKHEAT STATEtargetset : TOKTARGET TOKTEMPRATURE NUMBERempty :'
_lr_action_items = {'TOKHEAT':([0,1,2,3,4,5,8,10,],[-7,6,-1,-2,-3,-4,-5,-6,]),'TOKTARGET':([0,1,2,3,4,5,8,10,],[-7,7,-1,-2,-3,-4,-5,-6,]),'$end':([0,1,2,3,4,5,8,10,],[-7,0,-1,-2,-3,-4,-5,-6,]),'STATE':([6,],[8,]),'TOKTEMPRATURE':([7,],[9,]),'NUMBER':([9,],[10,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'commands':([0,],[1,]),'empty':([0,],[2,]),'command':([1,],[3,]),'heatswitch':([1,],[4,]),'targetset':([1,],[5,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> commands","S'",1,None,None,None),
('commands -> empty','commands',1,'p_commands','exam3parser.py',9),
('commands -> commands command','commands',2,'p_commands','exam3parser.py',10),
('command -> heatswitch','command',1,'p_command','exam3parser.py',15),
('command -> targetset','command',1,'p_command','exam3parser.py',16),
('heatswitch -> TOKHEAT STATE','heatswitch',2,'p_heatswitch','exam3parser.py',20),
('targetset -> TOKTARGET TOKTEMPRATURE NUMBER','targetset',3,'p_targetset','exam3parser.py',25),
('empty -> <empty>','empty',0,'p_empty','exam3parser.py',30),
]
| StarcoderdataPython |
1834099 | from typing import Tuple
from docker.models.containers import Container
from sip.utils.custom_docker_comm import CustomDockerClient
class TestBpiServer:
def test_if_container_starts(
self, bpi_example_server: Tuple[Container, CustomDockerClient],
):
container, custom_docker_client = bpi_example_server
health = custom_docker_client.check_health(container_id=container.id)
assert health
| StarcoderdataPython |
6608871 | from nbconvert.preprocessors import Preprocessor
class JekyllPreprocessor(Preprocessor): # skipcq: PYL-W0223
"""Preprocessor to add Jekyll metadata"""
def preprocess(self, nb, resources): # skipcq: PYL-R0201
"""Preprocess notebook
Add Jekyll metadata to notebook resources.
Args:
nb (NotebookNode): Notebook being converted.
resources (dict): Additional resources used by preprocessors and filters.
Returns:
NotebookNode: Modified notebook.
dict: Modified resources dictionary.
"""
name = resources["metadata"]["name"]
metadata = {"layout": "page", "title": name, "permalink": "/" + name}
metadata.update(nb.metadata.get("jekyll", {}))
resources["metadata"]["jekyll"] = metadata
return nb, resources
| StarcoderdataPython |
1759798 | <reponame>astar-club/scikit-snowland<filename>test/testcase/qgis_tool.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: 深圳星河软通科技有限公司 A.Star
# @contact: <EMAIL>
# @site: www.astar.ltd
# @file: qgis_tool.py
# @time: 2022/01/06 23:48
# @Software: PyCharm
import sys
import unittest
import pathlib
this_file = pathlib.Path(__file__)
path_pathlib = this_file.parent.parent.parent
path = str(path_pathlib)
print("path:", path)
if path not in sys.path:
sys.path.insert(0, path)
pathlib_snowland = path_pathlib / "snowland"
path_snowland = str(pathlib_snowland)
if path_snowland not in sys.path:
sys.path.insert(0, path_snowland)
import numpy as np
from astartool.project import std_logging
from astartool.number import equals_zero
from qgis.core import QgsDistanceArea, QgsUnitTypes, QgsPointXY
npa = np.array
# from snowland.gis_tool.qgis_tool import distance_area
from snowland.gis_tool import haversine, nds
class TestHaversine(unittest.TestCase):
@classmethod
@std_logging()
def setup_class(cls):
pass
@classmethod
@std_logging()
def teardown_class(cls):
pass
@std_logging()
def setup_method(self, method):
pass
@std_logging()
def teardown_method(self, method):
pass
@std_logging()
def setUp(self):
Ellipsoid = (haversine.EARTH_RADIUS * 1000, haversine.EARTH_RADIUS * 1000)
disClass = QgsDistanceArea()
disClass.setEllipsoid(Ellipsoid[0], Ellipsoid[1])
self.module = haversine
self.disClass = disClass
@std_logging()
def tearDown(self):
pass
def test_1(self):
"""
经度上加一个值
"""
p1 = [118, 40]
p2 = [118.1, 40]
p1_pointxy = QgsPointXY(*p1)
p2_pointxy = QgsPointXY(*p2)
line = self.disClass.measureLine(p1_pointxy, p2_pointxy)
qgis_meters = self.disClass.convertLengthMeasurement(line, QgsUnitTypes.DistanceMeters)
haversine_meters = self.module.haversine_metres(*p1_pointxy, *p2_pointxy)
self.assertTrue(np.isclose(qgis_meters - haversine_meters, 0)) # 毫米量级
def test_2(self):
"""
纬度上加一个值
"""
p1 = [118, 40]
p2 = [118, 40.1]
p1_pointxy = QgsPointXY(*p1)
p2_pointxy = QgsPointXY(*p2)
line = self.disClass.measureLine(p1_pointxy, p2_pointxy)
qgis_meters = self.disClass.convertLengthMeasurement(line, QgsUnitTypes.DistanceMeters)
haversine_meters = self.module.haversine_metres(*p1_pointxy, *p2_pointxy)
self.assertTrue(np.isclose(qgis_meters - haversine_meters, 0)) # 毫米量级
def test_3(self):
"""
测试相同的点
"""
p1 = [0, 40]
p2 = [0, 40]
p1_pointxy = QgsPointXY(*p1)
p2_pointxy = QgsPointXY(*p2)
line = self.disClass.measureLine(p1_pointxy, p2_pointxy)
qgis_meters = self.disClass.convertLengthMeasurement(line, QgsUnitTypes.DistanceMeters)
haversine_meters = self.module.haversine_metres(*p1_pointxy, *p2_pointxy)
self.assertTrue(np.isclose(qgis_meters - haversine_meters, 0)) # 毫米量级
def test_4(self):
"""
跨南北半球
"""
p1 = [-0.1, 0]
p2 = [0.1, 0]
p1_pointxy = QgsPointXY(*p1)
p2_pointxy = QgsPointXY(*p2)
line = self.disClass.measureLine(p1_pointxy, p2_pointxy)
qgis_meters = self.disClass.convertLengthMeasurement(line, QgsUnitTypes.DistanceMeters)
haversine_meters = self.module.haversine_metres(*p1_pointxy, *p2_pointxy)
self.assertTrue(np.isclose(qgis_meters - haversine_meters, 0)) # 毫米量级
def test_5(self):
"""
极大
"""
p1 = [90, 0]
p2 = [-90, 180]
p1_pointxy = QgsPointXY(*p1)
p2_pointxy = QgsPointXY(*p2)
line = self.disClass.measureLine(p1_pointxy, p2_pointxy)
qgis_meters = self.disClass.convertLengthMeasurement(line, QgsUnitTypes.DistanceMeters)
haversine_meters = self.module.haversine_metres(*p1_pointxy, *p2_pointxy)
print(qgis_meters, haversine_meters)
self.assertTrue(np.isclose(qgis_meters - haversine_meters, 0)) # 毫米量级
class TestNDS(unittest.TestCase):
def test_nds_array(self):
x = [119.135671785614] * 20
y = [34.5738355769335] * 20
z = nds.get_tile_id(x, y, 13)
self.assertEqual(len(z), 20)
for each in z:
self.assertEqual(each, 557386867)
| StarcoderdataPython |
6588449 | from django.apps import AppConfig
class FirstpageConfig(AppConfig):
name = 'firstPage'
| StarcoderdataPython |
3426945 | import sys
import torch
from torch import nn
from vedacore.misc import registry
def kl_div(inp, trg, reduction):
eps = sys.float_info.epsilon
d = trg*torch.log(eps+torch.div(trg, (inp+eps)))
if reduction == 'sum':
loss = torch.sum(d)
elif reduction == 'mean':
loss = torch.mean(d)
elif reduction == 'batchmean':
loss = torch.mean(torch.sum(d, dim=tuple(range(1, len(d.shape)))))
elif reduction == 'none':
loss = d
else:
raise ValueError(f'Reduction method "{reduction}" invalid!')
return loss
@registry.register_module('loss')
class KLDLoss(nn.Module):
"""
Args:
reduction (string, optional):
Specifies the reduction to apply to the output:
'none' | 'batchmean' | 'sum' | 'mean'.
'none': no reduction will be applied
'batchmean': the sum of the output will be divided by the batchsize
'sum': the output will be summed
'mean': the output will be divided by the number of elements in the
output
Default: 'sum'
"""
def __init__(self, reduction='sum'):
super().__init__()
self.reduction = reduction
def forward(self, inp, trg):
inp = inp / torch.sum(inp)
trg = trg / torch.sum(trg)
return kl_div(inp, trg, self.reduction)
| StarcoderdataPython |
287811 | from random import seed
from random import randrange
import csv
# Load a CSV file
def load_csv(filename):
dataset = list()
with open(filename, 'r') as input1:
reader = csv.reader(input1)
for row in reader:
if not row:
continue
dataset.append(row)
dataset.pop(0)
numrow = len(dataset)
numcol = len(dataset[0])
return dataset, numrow, numcol
# coverting the string into int
def change_class_label(dataset,numrow,numcol):
for i in range(numrow):
if (dataset[i][0] == 'No'):
dataset[i][0] = 0
elif (dataset[i][0] == 'Yes'):
dataset[i][0] = 1
# converting the string into float in the dataset
def string_to_float(dataset,numrow,numcol):
for i in range(numrow):
for j in range(1,numcol):
dataset[i][j] = float(dataset[i][j])
# Make a prediction with weights
def predict(row, weights, threshold):
sum_weight = weights[0] #this is the weight of the bias
for i in range(len(row)-1):
sum_weight += weights[i + 1] * row[i+1]
return 1 if sum_weight >= threshold else 0
# Estimate weights
def train_weights(trainset, l_rate, max_iter,threshold):
weights = [0.022 for i in range(len(trainset[0]))]
total_error = 0
for itern in range(max_iter):
for row in trainset:
prediction = predict(row, weights, threshold)
error = row[0] - prediction
for i in range(len(row)-1):
weights[i + 1] = weights[i + 1] + l_rate * error * row[i+1]
if(total_error == 0):
break
return weights
#function for cross validation
def cross_validation(dataset, l_rate, max_iter, threshold,numrow,numcol):
accuracy = []
size = numrow/10
for i in range(0,numrow,size):
train_start = i + size
trainset = dataset[train_start: ]
if(i-1 > 0):
for r in range(i):
trainset.append(dataset[r])
testset = dataset[i:i+size]
weights = train_weights(trainset, l_rate, max_iter, threshold)
correct_count = 0
for r in testset:
prediction = predict(r, weights, threshold)
actual = r[0]
if(prediction == actual):
correct_count+=1
acc = (correct_count/size) * 100.0
accuracy.append(acc)
print len(trainset)
print len(testset)
print acc
acc_sum = sum(i for i in accuracy)
avg_acc = acc_sum/len(accuracy)
print avg_acc
if __name__ == '__main__':
filename = 'SPECTF.csv'
dataset, numrow, numcol = load_csv(filename)
change_class_label(dataset,numrow,numcol)
string_to_float(dataset,numrow,numcol)
l_rate = .01
threshold = 75
max_iter= 100
cross_validation(dataset, l_rate, max_iter, threshold,numrow,numcol) | StarcoderdataPython |
6662831 | # Adaptive gamma correction based on the reference.
# Reference:
# <NAME>, <NAME> and <NAME>, "Efficient Contrast Enhancement Using Adaptive Gamma Correction With
# Weighting Distribution," in IEEE Transactions on Image Processing, vol. 22, no. 3, pp. 1032-1041,
# March 2013. doi: 10.1109/TIP.2012.2226047
# Revised from https://github.com/mss3331/AGCWD/blob/master/AGCWD.m
import numpy as np
import cv2
def agcwd(image, w=0.5):
is_colorful = len(image.shape) >= 3
img = extract_value_channel(image) if is_colorful else image
img_pdf = get_pdf(img)
max_intensity = np.max(img_pdf)
min_intensity = np.min(img_pdf)
w_img_pdf = max_intensity * (((img_pdf - min_intensity) / (max_intensity - min_intensity)) ** w)
w_img_cdf = np.cumsum(w_img_pdf) / np.sum(w_img_pdf)
l_intensity = np.arange(0, 256)
l_intensity = np.array([255 * (e / 255) ** (1 - w_img_cdf[e]) for e in l_intensity], dtype=np.uint8)
enhanced_image = np.copy(img)
height, width = img.shape
for i in range(0, height):
for j in range(0, width):
intensity = enhanced_image[i, j]
enhanced_image[i, j] = l_intensity[intensity]
enhanced_image = set_value_channel(image, enhanced_image) if is_colorful else enhanced_image
return enhanced_image
def extract_value_channel(color_image):
color_image = color_image.astype(np.float32) / 255.
hsv = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
v = hsv[:, :, 2]
return np.uint8(v * 255)
def get_pdf(gray_image):
height, width = gray_image.shape
pixel_count = height * width
hist = cv2.calcHist([gray_image], [0], None, [256], [0, 256])
return hist / pixel_count
def set_value_channel(color_image, value_channel):
value_channel = value_channel.astype(np.float32) / 255
color_image = color_image.astype(np.float32) / 255.
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
color_image[:, :, 2] = value_channel
color_image = np.array(cv2.cvtColor(color_image, cv2.COLOR_HSV2BGR) * 255, dtype=np.uint8)
return color_image
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--image', dest='img_path')
args = parser.parse_args()
img_path = args.img_path
img = cv2.imread(img_path)
cv2.imshow('base', img)
enhanced_image = agcwd(img)
cv2.imwrite('enhanced.jpg', enhanced_image)
cv2.imshow('enhanced', enhanced_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | StarcoderdataPython |
8184877 | <gh_stars>1-10
import math
from lcd_digit_recognizer.recognition.utils import unit_vector, absolute_angle, calculate_angle_distance
class DigitCenter(object):
def __init__(self, x, y, voter):
self._x = x
self._y = y
self._voters = set([voter])
self._neighbours = []
self._cocenters = []
@property
def x(self):
return int(self._x)
@property
def y(self):
return int(self._y)
def as_point(self):
return (self.x, self.y)
@property
def average_segment_length(self):
length_sum = sum(v.metric_length for v in self._voters)
return length_sum / len(self._voters)
@property
def average_segment_angle(self):
angle_sum = sum(v.absolute_angle for v in self._voters)
return angle_sum / len(self._voters)
@property
def average_segment_aligned_angle(self):
for voter in self._voters:
# TODO add aligning averaging algorithm
return voter.absolute_angle
@property
def neighbours(self):
return self._neighbours
@property
def cocenters(self):
return self._cocenters
def absolute_angle_to(self, cocenter):
dx = cocenter.x - self.x
dy = cocenter.y - self.y
direction = unit_vector([dx, dy])
return absolute_angle(direction)
def aligned_angle_distance_to(self, center):
a1 = self.absolute_angle_to(center)
a2 = self.average_segment_aligned_angle
return min(
calculate_angle_distance(a1, a2),
calculate_angle_distance(a1, a2 + 90),
calculate_angle_distance(a1, a2 + 180),
calculate_angle_distance(a1, a2 + 270),
)
def try_add_neighbour(self, neighbour):
if neighbour is self:
return
if neighbour in self._neighbours:
return
"""
for voter in self._voters:
if voter in neighbour._voters:
return
for voter in neighbour._voters:
if voter in self._voters:
return
"""
self._neighbours.append(neighbour)
neighbour._neighbours.append(self)
def add_cocenter(self, cocenter):
self._cocenters.append(cocenter)
cocenter._cocenters.append(self)
def remove_cocenter(self, cocenter):
self._cocenters.remove(cocenter)
cocenter._cocenters.remove(self)
def merge_with(self, digit_center):
svc = len(self._voters)
ovc = len(digit_center._voters)
tc = svc + ovc
self._x = (self._x * svc + digit_center._x * ovc) / tc
self._y = (self._y * svc + digit_center._y * ovc) / tc
self._voters.union(digit_center._voters)
def can_merge_with(self, digit_center):
return True
def distance_to(self, digit_center):
return math.sqrt((self._x - digit_center._x) ** 2 + (self._y - digit_center._y) ** 2)
def __repr__(self):
return f"({self._x},{self._y})"
| StarcoderdataPython |
6487724 | def binary_search(nums, target):
length = len(nums)
while length != 0:
def main():
nums = []
for i in range(100):
nums.append(i)
index = binary_search(nums, 98)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3530642 | from src.core.util.tools import prompt, print_error
from src.core.validator.validators import validate_string, validate_int, validate_date
from src.model.book import Book
class BookBuilder:
def __init__(self):
self.build()
@staticmethod
def build():
valid = False
book = book_name = author_name = published = pages = None
while not valid:
book_name = prompt("Book Name: ", clear=True).strip() if book_name is None else book_name
if not validate_string(book_name, "[a-zA-Z0-9]+", min_len=1, max_len=60):
print_error(f'Invalid name {book_name}')
book_name = None
continue
author_name = prompt("Author Name: ").strip() if author_name is None else author_name
if not validate_string(author_name, "[a-zA-Z0-9]+", min_len=1, max_len=60):
print_error(f'Invalid author name {author_name}')
author_name = None
continue
published = prompt("Published date: ").strip() if published is None else published
if not validate_date(published, "%d/%m/%Y"):
print_error(f'Invalid published date {published}')
published = None
continue
pages = prompt("Pages: ").strip() if pages is None else pages
if not validate_int(pages, min_value=1, max_value=1000):
print_error(f'Invalid pages number {pages}')
pages = None
continue
valid = True
book = Book.Builder() \
.with_author_name(author_name) \
.with_book_name(book_name) \
.with_pages(pages) \
.with_published(published) \
.build()
return book
| StarcoderdataPython |
8114862 | # coding: utf-8
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pymoment",
version="0.0.6",
packages=['moment'],
author="<NAME>",
author_email="<EMAIL>",
description='The python version of "moment" which is made with reference to "moment.js"',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/KrixTam/pymoment",
project_urls={
"Bug Tracker": "https://github.com/KrixTam/pymoment/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
py_modules=["moment"],
python_requires=">=3.6",
)
| StarcoderdataPython |
6526512 | <filename>bootstrap.py
from const import DB_NAME, DB_USER
from psycopg2 import connect
from pq import PQ
conn = connect('dbname={0} user={1}'.format(DB_NAME, DB_USER))
pq = PQ(conn)
pq.create()
| StarcoderdataPython |
12846181 | <reponame>rpi-techfundamentals/spring2020_website<gh_stars>1-10
**Chapter 19 – Training and Deploying TensorFlow Models at Scale**
_This notebook contains all the sample code in chapter 19._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/19_training_and_deploying_at_scale.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
!echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" > /etc/apt/sources.list.d/tensorflow-serving.list
!curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -
!apt update && apt-get install -y tensorflow-model-server
!pip install -q -U tensorflow-serving-api
IS_COLAB = True
except Exception:
IS_COLAB = False
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deploy"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# Deploying TensorFlow models to TensorFlow Serving (TFS)
We will use the REST API or the gRPC API.
## Save/Load a `SavedModel`
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis].astype(np.float32) / 255.
X_test = X_test[..., np.newaxis].astype(np.float32) / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
np.round(model.predict(X_new), 2)
model_version = "0001"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
!rm -rf {model_name}
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
!saved_model_cli show --dir {model_path}
!saved_model_cli show --dir {model_path} --tag_set serve
!saved_model_cli show --dir {model_path} --tag_set serve \
--signature_def serving_default
!saved_model_cli show --dir {model_path} --all
Let's write the new instances to a `npy` file so we can pass them easily to our model:
np.save("my_mnist_tests.npy", X_new)
input_name = model.input_names[0]
input_name
And now let's use `saved_model_cli` to make predictions for the instances we just saved:
!saved_model_cli run --dir {model_path} --tag_set serve \
--signature_def serving_default \
--inputs {input_name}=my_mnist_tests.npy
np.round([[1.1739199e-04, 1.1239604e-07, 6.0210604e-04, 2.0804715e-03, 2.5779348e-06,
6.4079795e-05, 2.7411186e-08, 9.9669880e-01, 3.9654213e-05, 3.9471846e-04],
[1.2294615e-03, 2.9207937e-05, 9.8599273e-01, 9.6755642e-03, 8.8930705e-08,
2.9156188e-04, 1.5831805e-03, 1.1311053e-09, 1.1980456e-03, 1.1113169e-07],
[6.4066830e-05, 9.6359509e-01, 9.0598064e-03, 2.9872139e-03, 5.9552520e-04,
3.7478798e-03, 2.5074568e-03, 1.1462728e-02, 5.5553433e-03, 4.2495009e-04]], 2)
## TensorFlow Serving
Install [Docker](https://docs.docker.com/install/) if you don't have it already. Then run:
```bash
docker pull tensorflow/serving
export ML_PATH=$HOME/ml # or wherever this project is
docker run -it --rm -p 8500:8500 -p 8501:8501 \
-v "$ML_PATH/my_mnist_model:/models/my_mnist_model" \
-e MODEL_NAME=my_mnist_model \
tensorflow/serving
```
Once you are finished using it, press Ctrl-C to shut down the server.
Alternatively, if `tensorflow_model_server` is installed (e.g., if you are running this notebook in Colab), then the following 3 cells will start the server:
os.environ["MODEL_DIR"] = os.path.split(os.path.abspath(model_path))[0]
%%bash --bg
nohup tensorflow_model_server \
--rest_api_port=8501 \
--model_name=my_mnist_model \
--model_base_path="${MODEL_DIR}" >server.log 2>&1
!tail server.log
import json
input_data_json = json.dumps({
"signature_name": "serving_default",
"instances": X_new.tolist(),
})
repr(input_data_json)[:1500] + "..."
Now let's use TensorFlow Serving's REST API to make predictions:
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status() # raise an exception in case of error
response = response.json()
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
### Using the gRPC API
from tensorflow_serving.apis.predict_pb2 import PredictRequest
request = PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = "serving_default"
input_name = model.input_names[0]
request.inputs[input_name].CopyFrom(tf.make_tensor_proto(X_new))
import grpc
from tensorflow_serving.apis import prediction_service_pb2_grpc
channel = grpc.insecure_channel('localhost:8500')
predict_service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
response = predict_service.Predict(request, timeout=10.0)
response
Convert the response to a tensor:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
y_proba = tf.make_ndarray(outputs_proto)
y_proba.round(2)
Or to a NumPy array if your client does not include the TensorFlow library:
output_name = model.output_names[0]
outputs_proto = response.outputs[output_name]
shape = [dim.size for dim in outputs_proto.tensor_shape.dim]
y_proba = np.array(outputs_proto.float_val).reshape(shape)
y_proba.round(2)
## Deploying a new model version
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(50, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
model_version = "0002"
model_name = "my_mnist_model"
model_path = os.path.join(model_name, model_version)
model_path
tf.saved_model.save(model, model_path)
for root, dirs, files in os.walk(model_name):
indent = ' ' * root.count(os.sep)
print('{}{}/'.format(indent, os.path.basename(root)))
for filename in files:
print('{}{}'.format(indent + ' ', filename))
**Warning**: You may need to wait a minute before the new model is loaded by TensorFlow Serving.
import requests
SERVER_URL = 'http://localhost:8501/v1/models/my_mnist_model:predict'
response = requests.post(SERVER_URL, data=input_data_json)
response.raise_for_status()
response = response.json()
response.keys()
y_proba = np.array(response["predictions"])
y_proba.round(2)
# Deploy the model to Google Cloud AI Platform
Follow the instructions in the book to deploy the model to Google Cloud AI Platform, download the service account's private key and save it to the `my_service_account_private_key.json` in the project directory. Also, update the `project_id`:
project_id = "onyx-smoke-242003"
import googleapiclient.discovery
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "my_service_account_private_key.json"
model_id = "my_mnist_model"
model_path = "projects/{}/models/{}".format(project_id, model_id)
model_path += "/versions/v0001/" # if you want to run a specific version
ml_resource = googleapiclient.discovery.build("ml", "v1").projects()
def predict(X):
input_data_json = {"signature_name": "serving_default",
"instances": X.tolist()}
request = ml_resource.predict(name=model_path, body=input_data_json)
response = request.execute()
if "error" in response:
raise RuntimeError(response["error"])
return np.array([pred[output_name] for pred in response["predictions"]])
Y_probas = predict(X_new)
np.round(Y_probas, 2)
# Using GPUs
tf.test.is_gpu_available()
tf.test.gpu_device_name()
tf.test.is_built_with_cuda()
from tensorflow.python.client.device_lib import list_local_devices
devices = list_local_devices()
devices
# Distributed Training
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
batch_size = 100
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
distribution = tf.distribute.MirroredStrategy()
# Change the default all-reduce algorithm:
#distribution = tf.distribute.MirroredStrategy(
# cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
# Specify the list of GPUs to use:
#distribution = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"])
# Use the central storage strategy instead:
#distribution = tf.distribute.experimental.CentralStorageStrategy()
#resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
#tf.tpu.experimental.initialize_tpu_system(resolver)
#distribution = tf.distribute.experimental.TPUStrategy(resolver)
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
model.predict(X_new)
Custom training loop:
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
K = keras.backend
distribution = tf.distribute.MirroredStrategy()
with distribution.scope():
model = create_model()
optimizer = keras.optimizers.SGD()
with distribution.scope():
dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).repeat().batch(batch_size)
input_iterator = distribution.make_dataset_iterator(dataset)
@tf.function
def train_step():
def step_fn(inputs):
X, y = inputs
with tf.GradientTape() as tape:
Y_proba = model(X)
loss = K.sum(keras.losses.sparse_categorical_crossentropy(y, Y_proba)) / batch_size
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
per_replica_losses = distribution.experimental_run(step_fn, input_iterator)
mean_loss = distribution.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses, axis=None)
return mean_loss
n_epochs = 10
with distribution.scope():
input_iterator.initialize()
for epoch in range(n_epochs):
print("Epoch {}/{}".format(epoch + 1, n_epochs))
for iteration in range(len(X_train) // batch_size):
print("\rLoss: {:.3f}".format(train_step().numpy()), end="")
print()
batch_size = 100 # must be divisible by the number of workers
model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid), batch_size=batch_size)
## Training across multiple servers
A TensorFlow cluster is a group of TensorFlow processes running in parallel, usually on different machines, and talking to each other to complete some work, for example training or executing a neural network. Each TF process in the cluster is called a "task" (or a "TF server"). It has an IP address, a port, and a type (also called its role or its job). The type can be `"worker"`, `"chief"`, `"ps"` (parameter server) or `"evaluator"`:
* Each **worker** performs computations, usually on a machine with one or more GPUs.
* The **chief** performs computations as well, but it also handles extra work such as writing TensorBoard logs or saving checkpoints. There is a single chief in a cluster. If no chief is specified, then the first worker is the chief.
* A **parameter server** (ps) only keeps track of variable values, it is usually on a CPU-only machine.
* The **evaluator** obviously takes care of evaluation. There is usually a single evaluator in a cluster.
The set of tasks that share the same type is often called a "job". For example, the "worker" job is the set of all workers.
To start a TensorFlow cluster, you must first specify it. This means defining all the tasks (IP address, TCP port, and type). For example, the following cluster specification defines a cluster with 3 tasks (2 workers and 1 parameter server). It's a dictionary with one key per job, and the values are lists of task addresses:
```
{
"worker": ["my-worker0.example.com:9876", "my-worker1.example.com:9876"],
"ps": ["my-ps0.example.com:9876"]
}
```
Every task in the cluster may communicate with every other task in the server, so make sure to configure your firewall to authorize all communications between these machines on these ports (it's usually simpler if you use the same port on every machine).
When a task is started, it needs to be told which one it is: its type and index (the task index is also called the task id). A common way to specify everything at once (both the cluster spec and the current task's type and id) is to set the `TF_CONFIG` environment variable before starting the program. It must be a JSON-encoded dictionary containing a cluster specification (under the `"cluster"` key), and the type and index of the task to start (under the `"task"` key). For example, the following `TF_CONFIG` environment variable defines a simple cluster with 2 workers and 1 parameter server, and specifies that the task to start is the first worker:
import os
import json
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["my-work0.example.com:9876", "my-work1.example.com:9876"],
"ps": ["my-ps0.example.com:9876"]
},
"task": {"type": "worker", "index": 0}
})
print("TF_CONFIG='{}'".format(os.environ["TF_CONFIG"]))
Some platforms (e.g., Google Cloud ML Engine) automatically set this environment variable for you.
Then you would write a short Python script to start a task. The same script can be used on every machine, since it will load the `TF_CONFIG` variable, which will tell it which task to start:
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()
worker0 = tf.distribute.Server(resolver.cluster_spec(),
job_name=resolver.task_type,
task_index=resolver.task_id)
Another way to specify the cluster specification is directly in Python, rather than through an environment variable:
cluster_spec = tf.train.ClusterSpec({
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
})
You can then start a server simply by passing it the cluster spec and indicating its type and index. Let's start the two remaining tasks (remember that in general you would only start a single task per machine; we are starting 3 tasks on the localhost just for the purpose of this code example):
#worker1 = tf.distribute.Server(cluster_spec, job_name="worker", task_index=1)
ps0 = tf.distribute.Server(cluster_spec, job_name="ps", task_index=0)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
print(repr(os.environ["TF_CONFIG"]))
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
"worker": ["127.0.0.1:9901", "127.0.0.1:9902"],
"ps": ["127.0.0.1:9903"]
},
"task": {"type": "worker", "index": 1}
})
#CUDA_VISIBLE_DEVICES=0
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
import tensorflow as tf
from tensorflow import keras
import numpy as np
# At the beginning of the program (restart the kernel before running this cell)
distribution = tf.distribute.experimental.MultiWorkerMirroredStrategy()
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full[..., np.newaxis] / 255.
X_test = X_test[..., np.newaxis] / 255.
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_new = X_test[:3]
n_workers = 2
batch_size = 32 * n_workers
dataset = tf.data.Dataset.from_tensor_slices((X_train[..., np.newaxis], y_train)).repeat().batch(batch_size)
def create_model():
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=7, activation="relu",
padding="same", input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.Conv2D(filters=128, kernel_size=3, activation="relu",
padding="same"),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=10, activation='softmax'),
])
with distribution.scope():
model = create_model()
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-2),
metrics=["accuracy"])
model.fit(dataset, steps_per_epoch=len(X_train)//batch_size, epochs=10)
# Hyperparameter tuning
# Only talk to ps server
config_proto = tf.ConfigProto(device_filters=['/job:ps', '/job:worker/task:%d' % tf_config['task']['index']])
config = tf.estimator.RunConfig(session_config=config_proto)
# default since 1.10
strategy.num_replicas_in_sync | StarcoderdataPython |
11290717 | <gh_stars>10-100
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import warnings
from netaddr import IPNetwork
from netaddr.ip import IPAddress
from netman import regex
from netman.adapters.shell.ssh import SshClient
from netman.adapters.shell.telnet import TelnetClient
from netman.adapters.switches.util import SubShell, split_on_bang, split_on_dedent, no_output, \
ResultChecker
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import IPNotAvailable, UnknownIP, UnknownVlan, UnknownAccessGroup, BadVlanNumber, \
BadVlanName, UnknownInterface, TrunkVlanNotSet, VlanVrfNotSet, UnknownVrf, BadVrrpTimers, BadVrrpPriorityNumber, \
BadVrrpTracking, VrrpAlreadyExistsForVlan, VrrpDoesNotExistForVlan, NoIpOnVlanForVrrp, BadVrrpAuthentication, \
BadVrrpGroupNumber, DhcpRelayServerAlreadyExists, UnknownDhcpRelayServer, VlanAlreadyExist, \
InvalidAccessGroupName, IPAlreadySet
from netman.core.objects.interface import Interface
from netman.core.objects.interface_states import OFF
from netman.core.objects.port_modes import ACCESS, TRUNK
from netman.core.objects.switch_base import SwitchBase
from netman.core.objects.vlan import Vlan
from netman.core.objects.vrrp_group import VrrpGroup
def ssh(switch_descriptor):
return BackwardCompatibleBrocade(switch_descriptor=switch_descriptor, shell_factory=SshClient)
def telnet(switch_descriptor):
return BackwardCompatibleBrocade(switch_descriptor=switch_descriptor, shell_factory=TelnetClient)
class Brocade(SwitchBase):
def __init__(self, switch_descriptor, shell_factory):
super(Brocade, self).__init__(switch_descriptor)
self.shell_factory = shell_factory
self.shell = None
def _connect(self):
shell_params = dict(
host=self.switch_descriptor.hostname,
username=self.switch_descriptor.username,
password=self.switch_descriptor.password,
)
if self.switch_descriptor.port:
shell_params["port"] = self.switch_descriptor.port
self.shell = self.shell_factory(**shell_params)
if self.shell.get_current_prompt().endswith(">"):
self.shell.do("enable", wait_for=":")
self.shell.do(self.switch_descriptor.password)
self.shell.do("skip-page-display")
def _disconnect(self):
self.shell.quit("exit")
self.logger.info(self.shell.full_log)
def _end_transaction(self):
pass
def _start_transaction(self):
pass
def commit_transaction(self):
self.shell.do("write memory")
def rollback_transaction(self):
pass
def get_vlans(self):
vlans = self._list_vlans()
self.add_vif_data_to_vlans(vlans)
return vlans
def get_vlan(self, number):
return self._get_vlan(number, include_vif_data=True)
def add_vlan(self, number, name=None):
result = self._show_vlan(number)
if not result[0].startswith("Error"):
raise VlanAlreadyExist(number)
with self.config():
result = self.shell.do('vlan {}{}'.format(number, " name {}".format(name) if name else ""))
if len(result) > 0:
if result[0].startswith("Error:"):
raise BadVlanNumber()
else:
raise BadVlanName()
else:
self.shell.do('exit')
def get_interfaces(self):
interfaces = []
vlans = []
interfaces_vlans = []
for if_data in split_on_dedent(self.shell.do("show interfaces")):
i = parse_interface(if_data)
if i:
interfaces.append(i)
for vlan_data in split_on_bang(self.shell.do("show running-config vlan")):
vlans.append(parse_vlan_runningconfig(vlan_data))
for interface in interfaces:
interfaces_vlans.append(get_interface_vlans_association(interface, vlans))
for interface_vlans in interfaces_vlans:
set_vlans_properties(interface_vlans)
return interfaces
def get_interface(self, interface_id):
vlans = []
if_data = self.shell.do("show interfaces {}".format(interface_id))
interface = parse_interface(if_data)
if not interface:
raise UnknownInterface(interface=interface_id)
for vlan_data in split_on_bang(self.shell.do("show running-config vlan")):
vlans.append(parse_vlan_runningconfig(vlan_data))
interface_vlans = get_interface_vlans_association(interface, vlans)
set_vlans_properties(interface_vlans)
return interface
def add_trunk_vlan(self, interface_id, vlan):
self._get_vlan(vlan)
with self.config(), self.vlan(vlan):
result = self.shell.do("tagged {}".format(interface_id))
if result:
raise UnknownInterface(interface_id)
def set_access_vlan(self, interface_id, vlan):
self._get_vlan(vlan)
with self.config(), self.vlan(vlan):
result = self.shell.do("untagged {}".format(interface_id))
if result:
raise UnknownInterface(interface_id)
def set_interface_native_vlan(self, interface_id, vlan):
return self.set_access_vlan(interface_id, vlan)
def reset_interface(self, interface_id):
result = self.shell.do("show vlan {}".format(interface_id))
if result and ('Invalid input' in result[0] or 'Error' in result[0]):
raise UnknownInterface(interface_id)
operations = self._get_vlan_association_removal_operations(result)
with self.config():
if len(operations) > 0:
for operation in operations:
self.shell.do("vlan {}".format(operation[0]))
self.shell.do("no {} {}".format(operation[1], interface_id))
self.shell.do("exit")
result = self.shell.do("no interface {}".format(interface_id))
if result:
raise UnknownInterface(interface_id)
def set_interface_state(self, interface_id, state):
with self.config(), self.interface(interface_id):
self.shell.do("disable" if state is OFF else "enable")
def unset_interface_access_vlan(self, interface_id):
content = self.shell.do("show vlan brief | include {}".format(_to_short_name(interface_id)))
if len(content) == 0:
raise UnknownInterface(interface_id)
self.logger.debug("show vlan result : \n" + "\n".join(content))
matches = re.compile("^(\d+).*").match(content[0])
with self.config(), self.vlan(int(matches.groups()[0])):
self.shell.do("no untagged {}".format(interface_id))
def unset_interface_native_vlan(self, interface_id):
return self.unset_interface_access_vlan(interface_id)
def remove_trunk_vlan(self, interface_id, vlan):
self._get_vlan(vlan)
with self.config(), self.vlan(vlan):
self.set("no tagged {}".format(interface_id))\
.on_result_matching("^Error.*", TrunkVlanNotSet, interface_id)\
.on_result_matching("^Invalid input.*", UnknownInterface, interface_id)
def remove_vlan(self, number):
self._get_vlan(number)
with self.config():
self.shell.do("no vlan {}".format(number))
def set_access_mode(self, interface_id):
result = self.shell.do("show vlan {}".format(interface_id))
if result and 'Invalid input' in result[0]:
raise UnknownInterface(interface_id)
operations = self._get_vlan_association_removal_operations(result)
if len(operations) > 0 and not (len(operations) == 1 and operations[0][1] == "untagged"):
with self.config():
for operation in operations:
self.shell.do("vlan {}".format(operation[0]))
self.shell.do("no {} {}".format(operation[1], interface_id))
self.shell.do("exit")
def set_trunk_mode(self, interface_id):
result = self.shell.do("show vlan {}".format(interface_id))
if result and 'Invalid input' in result[0]:
raise UnknownInterface(interface_id)
def set_vlan_icmp_redirects_state(self, vlan_number, state):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
with self.config(), self.interface_vlan(vlan):
if state:
self.shell.do('ip redirect')
else:
self.shell.do('no ip redirect')
def add_ip_to_vlan(self, vlan_number, ip_network):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
ip_exists = next((ip for ip in vlan.ips if ip.ip == ip_network.ip), False)
if ip_exists:
raise IPAlreadySet(ip_network)
with self.config(), self.interface_vlan(vlan):
ip_is_in_an_existing_network = any(ip_network in existing_ip for existing_ip in vlan.ips)
result = self.shell.do("ip address {}{}".format(ip_network, " secondary" if ip_is_in_an_existing_network else ""))
if len(result) > 0:
raise IPNotAvailable(ip_network)
def remove_ip_from_vlan(self, vlan_number, ip_network):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
existing_ip = next((ip for ip in vlan.ips if ip.ip == ip_network.ip and ip.netmask == ip_network.netmask), False)
if not existing_ip:
raise UnknownIP(ip_network)
with self.config(), self.interface_vlan(vlan):
on_hold = []
if not existing_ip.is_secondary:
for ip in vlan.ips:
if ip.is_secondary and ip in existing_ip:
on_hold.append(ip)
self.shell.do("no ip address {}".format(ip))
self.shell.do("no ip address {}".format(existing_ip))
if len(on_hold) > 0:
self.shell.do("ip address {}".format(on_hold[0]))
for ip in on_hold[1:]:
self.shell.do("ip address {} secondary".format(ip))
def set_vlan_access_group(self, vlan_number, direction, name):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
with self.config(), self.interface_vlan(vlan):
if vlan.access_groups[direction] is not None:
self.shell.do("no ip access-group {} {}".format(vlan.access_groups[direction], {IN: 'in', OUT: 'out'}[direction]))
result = self.shell.do("ip access-group {} {}".format(name, {IN: 'in', OUT: 'out'}[direction]))
if len(result) > 0 and not result[0].startswith("Warning:"):
raise InvalidAccessGroupName(name)
def unset_vlan_access_group(self, vlan_number, direction):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
if vlan.access_groups[direction] is None:
raise UnknownAccessGroup(direction)
else:
with self.config(), self.interface_vlan(vlan):
self.shell.do("no ip access-group {} {}".format(vlan.access_groups[direction], {IN: 'in', OUT: 'out'}[direction]))
def set_vlan_vrf(self, vlan_number, vrf_name):
vlan = self._get_vlan(vlan_number)
with self.config(), self.interface_vlan(vlan):
result = self.shell.do("vrf forwarding {}".format(vrf_name))
if regex.match("^Error.*", result[0]):
raise UnknownVrf(vrf_name)
def unset_vlan_vrf(self, vlan_number):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
if vlan.vlan_interface_name is None or vlan.vrf_forwarding is None:
raise VlanVrfNotSet(vlan_number)
else:
with self.config(), self.interface_vlan(vlan):
self.shell.do("no vrf forwarding {}".format(vlan.vrf_forwarding))
def get_vlan_interfaces(self, vlan_number):
interfaces = []
result = self._show_vlan(vlan_number)
if result[0].startswith("Error"):
raise UnknownVlan(vlan_number)
for line in result:
if regex.match("(Untagged|Statically tagged) Ports\s+: (.*)$", line):
for real_name in _to_real_names(parse_if_ranges(regex[1])):
interfaces.append(real_name)
return interfaces
def config(self):
return SubShell(self.shell, enter="configure terminal", exit_cmd='exit')
def vlan(self, vlan_number):
return SubShell(self.shell, enter="vlan {}".format(vlan_number), exit_cmd='exit')
def interface(self, interface_id):
return SubShell(self.shell, enter="interface {}".format(interface_id), exit_cmd='exit',
validate=no_output(UnknownInterface, interface_id))
def interface_vlan(self, vlan):
if vlan.vlan_interface_name is None:
self.shell.do("vlan {}".format(vlan.number))
self.shell.do("router-interface ve {}".format(vlan.number))
vlan.vlan_interface_name = str(vlan.number)
return SubShell(self.shell, enter=["interface ve {}".format(vlan.vlan_interface_name), "enable"], exit_cmd='exit')
def add_vrrp_group(self, vlan_number, group_id, ips=None, priority=None, hello_interval=None, dead_interval=None,
track_id=None, track_decrement=None):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
if len([g for g in vlan.vrrp_groups if g.id == group_id]) > 0:
raise VrrpAlreadyExistsForVlan(vlan=vlan_number, vrrp_group_id=group_id)
with self.config(), self.interface_vlan(vlan):
if len(vlan.vrrp_groups) == 0:
self.set('ip vrrp-extended auth-type simple-text-auth VLAN{}', vlan_number)\
.on_result_matching("^error - please configure ip address before configuring vrrp-extended.*", NoIpOnVlanForVrrp, vlan_number)\
.on_any_result(BadVrrpAuthentication)
self.set("ip vrrp-extended vrid {}".format(group_id)).on_any_result(BadVrrpGroupNumber, 1, 255)
try:
self.set_vrrp_properties(ips, priority, track_decrement, track_id, dead_interval, hello_interval)
self.shell.do('activate')
except:
self.shell.do('exit')
raise
def set_vrrp_properties(self, ips, priority, track_decrement, track_id, dead_interval, hello_interval):
self.set('backup priority {} track-priority {}', priority, track_decrement) \
.on_result_matching("^Invalid input -> {}.*".format(track_decrement), BadVrrpTracking) \
.on_result_matching(".*not between 1 and 254$".format(track_decrement), BadVrrpTracking) \
.on_any_result(BadVrrpPriorityNumber, 1, 255)
for i, ip in enumerate(ips):
self.set('ip-address {}', ip).on_any_result(IPNotAvailable, ip)
self.set('hello-interval {}', hello_interval).on_any_result(BadVrrpTimers)
self.set('dead-interval {}', dead_interval).on_any_result(BadVrrpTimers)
self.shell.do('advertise backup')
self.set('track-port {}', track_id).on_any_result(BadVrrpTracking)
def remove_vrrp_group(self, vlan_number, group_id):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
if not [group for group in vlan.vrrp_groups if group.id == group_id]:
raise VrrpDoesNotExistForVlan(vlan=vlan_number, vrrp_group_id=group_id)
with self.config(), self.interface_vlan(vlan):
result = self.shell.do('no ip vrrp-extended vrid {group_id}'.format(group_id=group_id))
if len(result) > 0:
raise VrrpDoesNotExistForVlan(vlan=vlan_number, vrrp_group_id=group_id)
if len(vlan.vrrp_groups) == 1:
self.shell.do('ip vrrp-extended auth-type no-auth')
def add_vif_data_to_vlans(self, vlans):
vlans_interface_name_dict = {vlan.vlan_interface_name: vlan for vlan in vlans if vlan.vlan_interface_name}
for int_vlan_data in split_on_bang(self.shell.do("show running-config interface")):
if regex.match("^interface ve (\d+)", int_vlan_data[0]):
current_vlan = vlans_interface_name_dict.get(regex[0])
if current_vlan:
add_interface_vlan_data(current_vlan, int_vlan_data)
def add_dhcp_relay_server(self, vlan_number, ip_address):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
if ip_address in vlan.dhcp_relay_servers:
raise DhcpRelayServerAlreadyExists(vlan_number=vlan_number, ip_address=ip_address)
with self.config(), self.interface_vlan(vlan):
self.shell.do("ip helper-address {}".format(ip_address))
def remove_dhcp_relay_server(self, vlan_number, ip_address):
vlan = self._get_vlan(vlan_number, include_vif_data=True)
if ip_address not in vlan.dhcp_relay_servers:
raise UnknownDhcpRelayServer(vlan_number=vlan_number, ip_address=ip_address)
with self.config(), self.interface_vlan(vlan):
self.shell.do("no ip helper-address {}".format(ip_address))
def set(self, command, *arguments):
result = None
if all([a is not None for a in arguments]):
result = self.shell.do(command.format(*arguments))
return ResultChecker(result)
def _list_vlans(self):
vlans = []
for vlan_data in split_on_bang(self.shell.do("show running-config vlan | begin vlan")):
vlans.append(parse_vlan(vlan_data))
return vlans
def _get_vlan(self, vlan_number, include_vif_data=False):
result = self._show_vlan(vlan_number)
if result[0].startswith("Error"):
raise UnknownVlan(vlan_number)
vlan = VlanBrocade(vlan_number)
for line in result:
if regex.match(".*PORT-VLAN \d*, Name ([^,]+),.*", line):
vlan.name = regex[0] if regex[0] != "[None]" else None
vlan.name = vlan.name if vlan.name != "DEFAULT-VLAN" else "default"
elif regex.match(".*Associated Virtual Interface Id: (\d+).*", line):
vlan.vlan_interface_name = regex[0]
if include_vif_data:
add_interface_vlan_data(vlan, self.shell.do("show running-config interface ve {}".format(regex[0])))
return vlan
def _show_vlan(self, vlan_number):
return self.shell.do("show vlan {}".format(vlan_number))
def _get_vlan_association_removal_operations(self, result):
operations = []
for line in result:
if regex.match("VLAN: (\d*) ([^\s]*)", line):
vlan, state = regex
if int(vlan) > 1:
operations.append((vlan, state.lower()))
return operations
def parse_vlan(vlan_data):
regex.match("^vlan (\d+).*", vlan_data[0])
current_vlan = VlanBrocade(int(regex[0]))
if regex.match("^vlan \d+ name ([^\s]*)", vlan_data[0]):
current_vlan.name = regex[0] if regex[0] != "DEFAULT-VLAN" else "default"
else:
current_vlan.name = None
for line in vlan_data[1:]:
if regex.match("^\srouter-interface ve (\d+)", line):
current_vlan.vlan_interface_name = regex[0]
return current_vlan
def add_interface_vlan_data(target_vlan, int_vlan_data):
vrrp_group = None
for line in int_vlan_data[1:]:
if vrrp_group is not None and not line.startswith(" "):
vrrp_group = False
if regex.match("^ ip address ([^\s]*)", line):
target_vlan.ips.append(BrocadeIPNetwork(regex[0], is_secondary=line.endswith("secondary")))
elif regex.match("^ ip access-group ([^\s]*) ([^\s]*)", line):
direction = {'in': IN, 'out': OUT}[regex[1]]
target_vlan.access_groups[direction] = regex[0]
elif regex.match("^ vrf forwarding ([^\s]*)", line):
target_vlan.vrf_forwarding = regex[0]
elif regex.match("^ ip vrrp-extended vrid ([^\s]*)", line):
vrrp_group = next((group for group in target_vlan.vrrp_groups if str(group.id) == regex[0]), None)
if vrrp_group is None:
vrrp_group = VrrpGroup(id=int(regex[0]))
target_vlan.vrrp_groups.append(vrrp_group)
elif regex.match("^ ip-address ([^\s]*)", line):
vrrp_group.ips.append(IPAddress(regex[0]))
if vrrp_group:
if regex.match("^ backup priority ([^\s]*) track-priority ([^\s]*)", line):
vrrp_group.priority = int(regex[0])
vrrp_group.track_decrement = int(regex[1])
elif regex.match("^ hello-interval ([^\s]*)", line):
vrrp_group.hello_interval = int(regex[0])
elif regex.match("^ dead-interval ([^\s]*)", line):
vrrp_group.dead_interval = int(regex[0])
elif regex.match("^ track-port (.*)", line):
vrrp_group.track_id = regex[0]
elif regex.match("^ activate", line):
vrrp_group = None
elif regex.match("^ ip helper-address ([^\s]*)", line):
target_vlan.dhcp_relay_servers.append(IPAddress(regex[0]))
elif regex.match("^ no ip redirect", line):
target_vlan.icmp_redirects = False
def parse_if_ranges(string):
consumed_string = string.strip()
while len(consumed_string) > 0:
if regex.match("^(([^\s]*) ([^\s]*) to ([^\s]*)).*", consumed_string):
parsed_part, port_type, lower_bound, higher_bound = regex
lower_values = lower_bound.split("/")
higher_values = higher_bound.split("/")
for port_id in range(int(lower_values[-1]), int(higher_values[-1]) + 1):
yield "{} {}/{}".format(port_type, "/".join(lower_values[:-1]), port_id)
else:
regex.match("^([^\s]* [^\s]*).*", consumed_string)
parsed_part = regex[0]
yield regex[0]
consumed_string = consumed_string[len(parsed_part):].strip()
def _to_real_names(if_list):
return [i.replace("ethe", "ethernet") for i in if_list]
def _to_short_name(interface_id):
return interface_id.replace("ethernet", "ethe")
def set_vlans_properties(interface_vlans):
if interface_vlans["untagged"] is not None and len(interface_vlans["tagged"]) == 0:
interface_vlans["object"].access_vlan = interface_vlans["untagged"]
elif interface_vlans["untagged"] is not None and len(interface_vlans["tagged"]) > 0:
interface_vlans["object"].trunk_native_vlan = interface_vlans["untagged"]
if len(interface_vlans["tagged"]) > 0:
interface_vlans["object"].port_mode = TRUNK
interface_vlans["object"].trunk_vlans = interface_vlans["tagged"]
def get_interface_vlans_association(interface, vlans):
interface_dic = {"tagged": [], "untagged": None, "object": interface}
for vlan in vlans:
if interface.name in vlan["tagged_interface"]:
interface_dic["tagged"].append(vlan['id'])
if interface.name in vlan["untagged_interface"]:
interface_dic["untagged"] = vlan['id']
return interface_dic
def parse_vlan_runningconfig(data):
vlan = {"tagged_interface": [], "untagged_interface": []}
if regex.match("^vlan (\d*)", data[0]):
vlan['id'] = int(regex[0])
for line in data:
if regex.match(" untagged (.*)", line):
for name in _to_real_names(parse_if_ranges(regex[0])):
vlan["untagged_interface"].append(name)
if regex.match(" tagged (.*)", line):
for name in _to_real_names(parse_if_ranges(regex[0])):
vlan["tagged_interface"].append(name)
return vlan
def parse_interface(if_data):
if regex.match("^\w*Ethernet([^\s]*) is (\w*).*", if_data[0]):
i = Interface(name="ethernet {}".format(regex[0]), port_mode=ACCESS, shutdown=regex[1] == "disabled")
for line in if_data:
if regex.match("Port name is (.*)", line):
i.description = regex[0]
return i
class VlanBrocade(Vlan):
def __init__(self, *args, **kwargs):
super(VlanBrocade, self).__init__(*args, **kwargs)
self.vlan_interface_name = kwargs.pop('vlan_interface_name', None)
self.icmp_redirects = True
class BrocadeIPNetwork(IPNetwork):
def __init__(self, *args, **kwargs):
self.is_secondary = kwargs.pop('is_secondary', False)
super(BrocadeIPNetwork, self).__init__(*args, **kwargs)
class BackwardCompatibleBrocade(Brocade):
def __init__(self, switch_descriptor, shell_factory):
super(BackwardCompatibleBrocade, self).__init__(switch_descriptor, shell_factory)
self.logger = logging.getLogger(
"{module}.{hostname}".format(module=Brocade.__module__,
hostname=self.switch_descriptor.hostname))
def add_trunk_vlan(self, interface_id, vlan):
return super(BackwardCompatibleBrocade, self).add_trunk_vlan(_add_ethernet(interface_id), vlan)
def set_interface_state(self, interface_id, state):
return super(BackwardCompatibleBrocade, self).set_interface_state(_add_ethernet(interface_id), state)
def set_trunk_mode(self, interface_id):
return super(BackwardCompatibleBrocade, self).set_trunk_mode(_add_ethernet(interface_id))
def set_access_vlan(self, interface_id, vlan):
return super(BackwardCompatibleBrocade, self).set_access_vlan(_add_ethernet(interface_id), vlan)
def set_access_mode(self, interface_id):
return super(BackwardCompatibleBrocade, self).set_access_mode(_add_ethernet(interface_id))
def remove_trunk_vlan(self, interface_id, vlan):
super(BackwardCompatibleBrocade, self).remove_trunk_vlan(_add_ethernet(interface_id), vlan)
def unset_interface_native_vlan(self, interface_id):
return super(BackwardCompatibleBrocade, self).unset_interface_native_vlan(_add_ethernet(interface_id))
def unset_interface_access_vlan(self, interface_id):
return super(BackwardCompatibleBrocade, self).unset_interface_access_vlan(_add_ethernet(interface_id))
def interface(self, interface_id):
return super(BackwardCompatibleBrocade, self).interface(_add_ethernet(interface_id))
def set_interface_native_vlan(self, interface_id, vlan):
return super(BackwardCompatibleBrocade, self).set_interface_native_vlan(_add_ethernet(interface_id), vlan)
def add_vrrp_group(self, vlan_number, group_id, ips=None, priority=None, hello_interval=None, dead_interval=None,
track_id=None, track_decrement=None):
return super(BackwardCompatibleBrocade, self).add_vrrp_group(vlan_number, group_id, ips, priority,
hello_interval, dead_interval,
_add_ethernet(track_id), track_decrement)
def reset_interface(self, interface_id):
return super(BackwardCompatibleBrocade, self).reset_interface(_add_ethernet(interface_id))
def _add_ethernet(interface_id):
if interface_id is not None and re.match("^\d.*", interface_id):
warnings.warn("The brocade interface naming without the \"ethernet\" prefix has been deprecated", DeprecationWarning)
return "ethernet {}".format(interface_id)
return interface_id
| StarcoderdataPython |
9619977 | <reponame>FrederichRiver/neutrino3
from typing import Tuple
import pandas as pd
from libmysql_utils.mysql8 import mysqlHeader, mysqlQuery
from libbasemodel.form import formStockManager
from pandas import DataFrame, Series
from .event import XrdrEvent
"""
1.从MySQL查询数据
2.数据清理
3.生成迭代器
"""
class DataBase(mysqlQuery):
"""
用于查询数据的基类
"""
def __init__(self, header: mysqlHeader, from_date: str, end_date: str) -> None:
"""
input date format
from_date: 2021-08-01
end_date: 2021-08-31
"""
super().__init__(header)
self.pool = []
self.data = DataFrame()
self.from_date = from_date
self.end_date = end_date
self._stock_list = []
self.dataline = DataFrame()
self.prev_date = None
self.prev_dataline = DataFrame()
self.factor = {}
def Config(self, **args):
"""
这个方法将来要被重构以适应具体的类。
"""
raise NotImplementedError
def _add_asset(self, stock_code: str):
"""
param: stock_code is list or str type
"""
if isinstance(stock_code, str):
self.pool.append(stock_code)
elif isinstance(stock_code, list):
self.pool.extend(stock_code)
@property
def asset_list(self) -> list:
"""
从数据库获取所有的股票代码,供查询股票代码的真实性
"""
query_stock_code = self.session.query(formStockManager.stock_code).filter_by(flag='t').all()
df = pd.DataFrame.from_dict(query_stock_code)
df.columns = ['stock_code']
self._stock_list = df['stock_code'].tolist()
# should test if stock list is null
return self._stock_list
def isStock(self, stock_code: str) -> bool:
"""
判断是否是真实存在的股票代码
"""
return stock_code in self._stock_list
class XrdrDataEngine(DataBase):
"""
专用于相关性计算的数据引擎,仅提供复权数据。
"""
def Config(self, **args):
"""
param: {'asset': [stock_1, stock_2, ...]}
"""
asset_list = args.get('asset', [])
for asset in asset_list:
self._add_asset(asset)
self._update()
def get_data(self, stock_code: str, start='', end='') -> Series:
"""
每个stock返回3列数据‘收盘价’,‘前收盘价’,‘复权价(未复权,待复权计算)’
"""
query_column = 'trade_date,close_price,adjust_factor'
def_column = ['trade_date', f"{stock_code}", "adjust_factor"]
if start or end:
df = self.condition_select(stock_code, query_column, f"trade_date BETWEEN '{start}' AND '{end}'")
else:
df = self.select_values(stock_code, query_column)
if not df.empty:
df.columns = def_column
df['trade_date'] = pd.to_datetime(df['trade_date'])
df.set_index('trade_date', inplace=True)
df[stock_code] = df[stock_code] * df['adjust_factor']
else:
df = DataFrame()
return df[stock_code]
class StockData(DataBase):
"""
Active data engine
param: from_date, format "2021-05-25"
param: end_date, format "2022-05-25"\n
Provide api:
1. iter method: by using iter of StockData to get dataline
2. get: query data for a specific date
"""
def __str__(self) -> str:
return f"From {self.from_date} to {self.end_date}"
"""用于初始化"""
def Config(self, **args):
"""
param: {'asset': [stock_1, stock_2, ...]}
"""
asset_list = args.get('asset', [])
for asset in asset_list:
self._add_asset(asset)
self._update()
def get_data(self, stock_code: str, start='', end='') -> DataFrame:
"""
每个stock返回3列数据‘收盘价’,‘前收盘价’,‘复权价(未复权,待复权计算)’
"""
query_column = 'trade_date,close_price,prev_close_price,adjust_factor'
def_column = ['trade_date', f"{stock_code}", f"{stock_code}_prev", f"{stock_code}_factor"]
if start or end:
df = self.condition_select(stock_code, query_column, f"trade_date BETWEEN '{start}' AND '{end}'")
else:
df = self.select_values(stock_code, query_column)
if not df.empty:
df.columns = def_column
df[f"{stock_code}_xrdr"] = df[stock_code] * df[f"{stock_code}_factor"]
df['trade_date'] = pd.to_datetime(df['trade_date'])
df.set_index('trade_date', inplace=True)
result = df[[stock_code, f"{stock_code}_xrdr", f"{stock_code}_prev"]]
return result
else:
df = DataFrame()
return df
def _update(self):
for stock in self.pool:
if stock not in self.data.index:
df = self.get_data(stock_code=stock, start=self.from_date, end=self.end_date)
self.data = pd.concat([self.data, df], axis=1)
self.factor[stock] = 1.0
self.data.dropna(axis=0, how='any', inplace=True)
"""用于逐日回测"""
def __iter__(self):
return self.data.iterrows()
def get(self, query_date: pd.Timestamp) -> DataFrame:
"""
按日期返回数据行,用于逐日回测
"""
if query_date in self.data.index:
self.prev_date = query_date
self.prev_dataline = self.dataline
self.dataline = self.data.loc[query_date]
for stock_code in self.pool:
self.dataline[f"{stock_code}_xrdr"] = self.dataline[f"{stock_code}_xrdr"] * self.factor[stock_code]
return self.dataline
def update_factor(self, Xrdr_event: XrdrEvent) -> float:
"""
根据XRDR事件进行复权因子更新。
"""
stock_id = Xrdr_event.stock_id
price = self.prev_dataline[f"{stock_id}_prev"]
self.factor[stock_id] = (1 - Xrdr_event.dividend / (10 * price)) / (1 + Xrdr_event.increase / 10 + Xrdr_event.bonus / 10)
return self.factor[stock_id]
class EventEngine(DataBase):
table_name = 'stock_interest'
# query data
# event, return (x1, x2, x3),分红,送股,转股
# API
def Config(self, **args):
"""
param: {'asset': [stock_1, stock_2, ...]}
"""
asset_list = args.get('asset', [])
for asset in asset_list:
self._add_asset(asset)
self.load()
def _reset(self):
"""
仅被load调用,将类重置
"""
self.data = {}
def load(self):
"""
类重置之后,重新载入数据。
"""
self._reset()
for stock in self.pool:
df = self.get_data(stock_code=stock, start=self.from_date, end=self.end_date)
self.data[stock] = df
def get_data(self, stock_code: str, start='', end='') -> DataFrame:
query_column = 'float_bonus,float_increase,float_dividend,xrdr_date,char_stock_code'
if start or end:
df = self.condition_select(self.table_name, query_column, f"char_stock_code='{stock_code}' AND (xrdr_date BETWEEN '{start}' AND '{end}')")
else:
df = self.select_values(self.table_name, query_column)
if not df.empty:
def_column = ['bonus', 'increase', 'dividend', 'xrdr_date', 'stock_code']
df.columns = def_column
df['xrdr_date'] = pd.to_datetime(df['xrdr_date'])
df.set_index('xrdr_date', inplace=True)
else:
df = DataFrame()
return df
# API
def get(self, query_date: pd.Timestamp) -> Tuple[XrdrEvent, XrdrEvent]:
event_list = []
for stock_code in self.pool:
if not self.data[stock_code].empty:
if query_date in self.data[stock_code].index:
self.dataline = self.data[stock_code].loc[query_date]
event_list.append(XrdrEvent(query_date, self.dataline))
return event_list
| StarcoderdataPython |
94743 | <filename>scrapy-template/spider/{{class_prefix}}.py
# -*- coding: utf-8 -*-
import scrapy
class {{class_prefix}}Spider(scrapy.Spider):
name = '{{spider_name}}'
allowed_domains = ['{{spider_name}}']
custom_settings = {
'CONCURRENT_REQUESTS': 2,
'DOWNLOAD_DELAY': 0.25
}
defaultHeaders = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
def start_requests(self):
urls = [
'https://xxxx.com',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parseList, headers=self.defaultHeaders)
def parseList(self, response):
blocks = response.css('.pic-txt')
for b in blocks:
url = 'https:' + b.css('.tit a::attr(href)')[0].extract()
#yield {'url': url}
yield scrapy.Request(url=url, callback=self.parseArticle, headers=self.defaultHeaders)
def parseArticle(self, response):
yield {
'title': response.css('.artTit::text')[0].extract(),
'content': "".join(response.css('.artText *::text').extract()),
'tag': " ".join(response.css('.artLabel a::text').extract()),
}
| StarcoderdataPython |
3415347 | <reponame>gustavo-mendel/my-college-projects<gh_stars>1-10
#!/usr/bin/env python3
from __future__ import print_function
import os
import os.path
import tempfile
import subprocess
import time
import signal
import re
import sys
import shutil
create = 0
log = 0
file_locations = os.path.expanduser(os.getcwd())
logisim_location = os.path.join(os.getcwd(),"../../../logisim-evolution.jar")
if log:
new = open('new.out', 'w')
logfile = open('TEST_LOG','w')
def student_reference_match_unbounded(student_out, reference_out):
while True:
line1 = student_out.readline()
line2 = reference_out.readline()
if line2 == '':
break
if line1 != line2:
return False
return True
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
class TestCase(object):
def __init__(self, circfile, outfile, tracefile, points):
self.circfile = circfile
self.outfile = outfile
self.tracefile = tracefile
self.points = points
class AbsoluteTestCase(TestCase):
"""
All-or-nothing test case.
"""
def __call__(self):
output = tempfile.TemporaryFile(mode='r+')
try:
stdinf = open('/dev/null')
except Exception as e:
try:
stdinf = open('nul')
except Exception as e:
print("The no nul directories. Program will most likely error now.")
proc = subprocess.Popen(["java","-jar",logisim_location,"-tty","table",self.circfile], stdin=stdinf, stdout=subprocess.PIPE)
try:
assure_path_exists(self.outfile)
outfile = open(self.outfile, "wb")
student_out = proc.stdout.read()
outfile.write(student_out)
outfile.close()
assure_path_exists(self.outfile)
outfile = open(self.outfile, "r")
reference = open(self.tracefile)
passed = student_reference_match_unbounded(outfile, reference)
finally:
try:
os.kill(proc.pid,signal.SIGTERM)
except Exception as e:
pass
if passed:
return (self.points,"Matched expected output")
else:
return (0,"Did not match expected output")
def test_submission(name,outfile,tests):
# actual submission testing code
print ("Testing submission")
total_points = 0
total_points_received = 0
tests_passed = 0
tests_partially_passed = 0
tests_failed = 0
test_results = []
for description,test,points_received,reason in ((description,test) + test() for description,test in tests): # gross
points = test.points
assert points_received <= points
if points_received == points:
print ("\t%s PASSED test: %s" % (name, description))
if log:
print ("\t%s PASSED test: %s" % (name, description), file=logfile)
total_points += points
total_points_received += points
tests_passed += 1
test_results.append("\tPassed test \"%s\" worth %d points." % (description,points))
elif points_received > 0:
print ("\t%s PARTIALLY PASSED test: %s" % (name,description))
if log:
print ("\t%s PARTIALLY PASSED test: %s" % (name,description), file=logfile)
total_points += points
total_points_received += points_received
tests_partially_passed += 1
test_results.append("\tPartially passed test \"%s\" worth %d points (received %d)" % (description, points, points_received))
else:
print ("\t%s FAILED test: %s" % (name, description))
if log:
print ("\t%s FAILED test: %s" % (name, description), file=logfile)
total_points += points
tests_failed += 1
test_results.append("\tFailed test \"%s\" worth %d points. Reason: %s" % (description, points, reason))
print ("\tScore for %s: %d/%d (%d/%d tests passed, %d partially)" %\
(name, total_points_received, total_points, tests_passed,
tests_passed + tests_failed + tests_partially_passed, tests_partially_passed))
print ("%s: %d/%d (%d/%d tests passed, %d partially)" %\
(name, total_points_received, total_points, tests_passed,
tests_passed + tests_failed + tests_partially_passed, tests_partially_passed), file=outfile)
if log:
print ("\n\n%s: %d/%d (%d/%d tests passed, %d partially)" %\
(name, total_points_received, total_points, tests_passed,
tests_passed + tests_failed + tests_partially_passed, tests_partially_passed), file=logfile)
for line in test_results:
print (line, file=outfile)
if log:
print ( line, file=logfile)
return points_received
def main(tests):
test_submission('ADDI',sys.stdout,tests)
| StarcoderdataPython |
4919274 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import validation
from keystone.common.validation import parameter_types
_policy_properties = {
'domain_id': parameter_types.id_string,
'name': parameter_types.name,
'description': validation.nullable(parameter_types.description),
'enabled': parameter_types.boolean,
'rules': {
'type': 'object'
}
}
policy_create = {
'type': 'object',
'properties': _policy_properties,
'required': ['domain_id', 'name', 'rules'],
'additionalProperties': True
}
policy_update = {
'type': 'object',
'properties': _policy_properties,
'minProperties': 1,
'additionalProperties': True
}
_rule_properties = {
'policy_id': parameter_types.id_string,
'service': {
'type': 'string'
},
'permission': {
'type': 'string'
},
'condition': {
'type': 'string'
},
}
rule_create = {
'type': 'object',
'properties': _rule_properties,
'required': ['policy_id', 'service', 'permission', 'condition'],
'additionalProperties': False
}
rule_update = {
'type': 'object',
'properties': _rule_properties,
'minProperties': 1,
'additionalProperties': False
}
| StarcoderdataPython |
9751143 | <reponame>msarilar/simplefix
#! /usr/bin/env python3
import simplefix
p = simplefix.FixParser()
f = open("secdef.dat")
for line in f:
line = line.rstrip('\n')
p.append_buffer(b'8=FIX.4.2\x01')
p.append_buffer(line)
p.append_buffer(b'10=000\x01')
m = p.get_message()
print(m)
f.close()
| StarcoderdataPython |
1618524 | <gh_stars>10-100
from collections import MutableSequence
from celery import Celery,chord,group
from kombu import Queue
import pandas as pd
from sklearn.metrics.pairwise import linear_kernel
from scipy.io import mmread
import pickle
import requests
from bs4 import BeautifulSoup
import time
from celery.result import allow_join_result
broker_url = 'amqp://localhost'
app= Celery('tasks',backend='redis://',broker=broker_url)
app.conf.task_default_queue = 'general'
app.conf.task_queues=(
Queue('general',routing_key='tasks.#'),
Queue('crawl',routing_key='crawl.#'),
)
Tfidf_matrix = mmread('./model/tfidf.mtx').tocsr()
Tfidf = ""
with open('./model/tfidf.pickle', 'rb') as f:
Tfidf = pickle.load(f)
@app.task
def merge_text(texts):
rtn = []
for i in texts:
rtn.append(i)
return rtn
@app.task
def crawl(books):
_id = books[0]
item_id=books[1]
comment_review = []
try:
is_orderer = 1 # 2일 경우 전체, 1일 경우 구매자
url = f"https://www.aladin.co.kr/ucl/shop/product/ajax/GetCommunityListAjax.aspx?ProductItemId={item_id}&itemId={item_id}&pageCount=100&communitytype=CommentReview&nemoType=-1&page=1&startNumber=1&endNumber=10&sort=2&IsOrderer={is_orderer}&BranchType=1&IsAjax=true&pageType=0"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
boxs = soup.find_all(class_="blog_list3")
for box in boxs:
try:
value = box.find("a").get_text(strip=True)
comment_review.append(value.replace("이 글에는 스포일러가 포함되어 있습니다. 보시겠습니까?회색 영역을 클릭하면 내용을 확인할 수 있습니다.", ""))
except Exception as e:
print(item_id, e)
except Exception as e:
print(item_id, e)
return {"_id":_id,"item_id":item_id,"reviews":comment_review}
@app.task
def find_books(searchKeyword):
global Tfidf_matrix, Tfidf
sentence = searchKeyword
sentence_vec = Tfidf.transform([sentence])
cosine_sim = linear_kernel(sentence_vec, Tfidf_matrix)
simScore = [x for x in enumerate(cosine_sim[-1]) if x[1] > 0]
simScore = sorted(simScore, key=lambda x:x[-1], reverse=True)
simScore = simScore[0:len(simScore)]
books = [i[0] for i in simScore]
#print(books[0:10] if len(books)>10 else books)
result = books[0:20] if len(books)>20 else books
return result
@app.task
def multi(searchKeyword):
"""
1. 유사도 책찾기
2. 서브 태스크 만들어 n개의 워커에서 동시에 크롤링 병렬 처리
3. 서브 태스크 만들어 m개의 워커에서 리뷰 분할하여 처리
"""
books = find_books(searchKeyword)
return books
return books
| StarcoderdataPython |
6579219 | <reponame>HerrX2000/zreader
#!/usr/bin/env python3
import zreader
import ujson as json
# Adjust chunk_size as necessary -- defaults to 16,384 if not specified
reader = zreader.Zreader("reddit_data.zst", chunk_size=8192)
# Read each line from the reader
for line in reader.readlines():
obj = json.loads(line)
print (obj['author'], obj['subreddit'], sep=",")
| StarcoderdataPython |
5088736 | <filename>hash_api/config/settings.py
#!/usr/bin/env python3
import os
class BaseConfig(object):
def __init__(self):
self.debug = True if os.environ['HASH_API_DEBUG'].lower() == 'true' else False
self.host = os.environ['HASH_API_HOST']
self.port = int(os.environ['HASH_API_PORT'])
self.application_secret = os.environ['HASH_APP_SECRET']
class Config(BaseConfig):
def __init__(self):
super().__init__()
self.log_file_abs_path = os.environ['HASH_API_LOG']
self.mongo_host = os.environ['HASH_MONGO_HOST']
self.mongo_port = int(os.environ['HASH_MONGO_PORT'])
self.mongo_user = os.environ['HASH_MONGO_USER']
self.mongo_pass = <PASSWORD>['<PASSWORD>']
self.mongo_db_name = os.environ['HASH_MONGO_DB']
self.jwt_secret = os.environ['HASH_JWT_SECRET']
| StarcoderdataPython |
8028514 | <filename>BrowserRefresh.py
import os
import sys
import platform
import sublime
import sublime_plugin
# Fix windows imports
__file__ = os.path.normpath(os.path.abspath(__file__))
__path__ = os.path.dirname(__file__)
if __path__ not in sys.path:
sys.path.insert(0, __path__)
_pywinauto = os.path.join(__path__ + os.path.sep + 'win')
if _pywinauto not in sys.path:
sys.path.insert(0, _pywinauto)
class BrowserRefreshCommand(sublime_plugin.TextCommand):
def run(self, args, activate=True,
browsers=['chrome'], auto_save=True, delay=None):
_os = platform.system()
# Auto-save
if auto_save and self.view and self.view.is_dirty():
self.view.run_command('save')
# Detect OS and import
if _os == 'Darwin':
from mac import MacBrowserRefresh
refresher = MacBrowserRefresh(activate)
elif _os == 'Windows':
from win import WinBrowserRefresh
refresher = WinBrowserRefresh(activate)
elif _os == 'Linux':
from linux import LinuxBrowserRefresh
refresher = LinuxBrowserRefresh(activate)
else:
sublime.error_message('Your operating system is not supported')
# Delay refresh
if delay is not None:
import time
time.sleep(delay)
# Actually refresh browsers
if 'chrome' in browsers:
refresher.chrome()
if 'canary' in browsers and _os == 'Darwin':
refresher.canary()
if 'yandex' in browsers and _os == 'Darwin':
refresher.yandex()
if 'safari' in browsers:
refresher.safari()
if 'webkit' in browsers and _os == 'Darwin':
refresher.webkit()
if 'firefox' in browsers:
refresher.firefox()
if 'firefoxdev' in browsers and _os == 'Darwin':
refresher.firefox_dev()
if 'opera' in browsers:
refresher.opera()
if 'ie' in browsers and _os == 'Windows':
refresher.ie()
if 'iron' in browsers and _os == 'Windows':
refresher.iron()
if 'palemoon' in browsers and _os == 'Windows':
refresher.palemoon()
| StarcoderdataPython |
3439485 | from os import path, mkdir, listdir
from click import echo, argument, option
from steam.enums.common import EType
from . import app, db
from .models import Map, Server, Access, User
from .util import string_to_steamid
@app.cli.group('db')
def database():
"""Database-related commands"""
pass
@database.command()
def drop():
"""Drop database tables"""
db.drop_all()
@database.command('create')
def create_database():
"""Create database tables"""
db.create_all()
@app.cli.group()
def maps():
"""Map-related utilites"""
pass
@maps.command('path')
def get_upload_path():
"""Get the effective configured upload path"""
echo(app.config['UPLOAD_DIR'])
@maps.command('create')
def create_uploads():
"""Create the upload directory"""
try:
if not path.isdir(app.config['UPLOAD_DIR']):
mkdir(app.config['UPLOAD_DIR'])
except IOError as e:
echo('Could not create directory: ' + str(e))
@maps.command()
def discover():
"""Add pre-existing maps to the database"""
existing = list(map(lambda m: m.name, Map.query.all()))
for name in listdir(app.config['UPLOAD_DIR']):
full_path = path.join(app.config['UPLOAD_DIR'], name)
if (
name in existing or
not path.isfile(full_path) or
not name.endswith('.bsp')
):
continue
if name in app.config['BUILTIN']:
echo('warning: ignoring builtin map ' + name)
continue
with open(full_path, 'rb') as file:
magic_number = file.read(4)
if magic_number == b'VBSP':
db.session.add(Map(name=name, uploaded=True))
echo('added ' + name)
else:
echo('warning: ignoring invalid BSP ' + name)
db.session.commit()
@maps.command()
def prune():
"""Remove maps that do not exist on the filesystem"""
for map in Map.query.all():
if not path.isfile(map.filename):
db.session.delete(map)
echo('pruned ' + map.name)
db.session.commit()
@app.cli.group()
def user():
"""Manage users"""
pass
@user.command('create')
@argument('steamid')
@option('--admin/--not-admin', default=False)
def create_user(steamid, admin):
"""Create a user"""
steamid = string_to_steamid(steamid)
if not steamid.is_valid() or not steamid.type == EType.Individual:
echo('Invalid steam ID')
return 1
user = User(steamid64=steamid.as_64, admin=admin)
user.refresh_name()
if user.name is not None:
db.session.add(user)
db.session.commit()
echo('added ' + user.name)
else:
echo('No such steam user')
return 1
@user.command('delete')
@argument('steamid64', type=int)
def delete_user(steamid64):
"""Delete a user"""
user = User.query.get(steamid64)
if user is None:
echo('No such user')
return 1
db.session.delete(user)
db.session.commit()
echo('deleted ' + user.name)
@user.command('list')
def list_users():
"""List all users"""
echo('Steam ID'.ljust(17, ' ') + ' Name')
echo('-' * 79)
for user in User.query.all():
echo(str(user.steamid64) + ' ' + user.name)
| StarcoderdataPython |
1727021 | from datetime import datetime, timedelta
def create_expiration_cookie_time():
tomorrow = datetime.now() + timedelta(days=2)
tomorrow = datetime.replace(tomorrow, hour=0, minute=0, second=0)
expires = tomorrow.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
return expires
| StarcoderdataPython |
4964161 | from __future__ import print_function
import sys
import h2o
sys.path.insert(1,"../../../")
from tests import pyunit_utils
from h2o.estimators.isolation_forest import H2OIsolationForestEstimator
#testing default setup of following parameters:
#distribution (available in Deep Learning, XGBoost, GBM):
#stopping_metric (available in: GBM, DRF, Deep Learning, AutoML, XGBoost, Isolation Forest):
#histogram_type (available in: GBM, DRF)
#solver (available in: GLM) already done in hex.glm.GLM.defaultSolver()
#categorical_encoding (available in: GBM, DRF, Deep Learning, K-Means, Aggregator, XGBoost, Isolation Forest)
#fold_assignment (available in: GBM, DRF, Deep Learning, GLM, Naïve-Bayes, K-Means, XGBoost)
def test_isolation_forrest_effective_parameters():
train2 = h2o.import_file(pyunit_utils.locate("smalldata/anomaly/ecg_discord_train.csv"))
if1 = H2OIsolationForestEstimator(ntrees=7, seed=12, sample_size=5, stopping_rounds=3)
if1.train(training_frame=train2)
if2 = H2OIsolationForestEstimator(ntrees=7, seed=12, sample_size=5, stopping_rounds=3, stopping_metric = 'anomaly_score', categorical_encoding="Enum")
if2.train(training_frame=train2)
assert if1.parms['stopping_metric']['input_value'] == 'AUTO'
assert if1.parms['stopping_metric']['actual_value'] == if2.parms['stopping_metric']['actual_value']
assert if1._model_json['output']['training_metrics']._metric_json['mean_score'] == if2._model_json['output']['training_metrics']._metric_json['mean_score']
assert if1.parms['categorical_encoding']['input_value'] == 'AUTO'
assert if1.parms['categorical_encoding']['actual_value'] == if2.parms['categorical_encoding']['actual_value']
try:
h2o.rapids("(setproperty \"{}\" \"{}\")".format("sys.ai.h2o.algos.evaluate_auto_model_parameters", "false"))
if1 = H2OIsolationForestEstimator(ntrees=7, seed=12, sample_size=5, stopping_rounds=3)
if1.train(training_frame=train2)
if2 = H2OIsolationForestEstimator(ntrees=7, seed=12, sample_size=5, stopping_rounds=3, stopping_metric = 'anomaly_score', categorical_encoding="Enum")
if2.train(training_frame=train2)
assert if1.parms['stopping_metric']['input_value'] == 'AUTO'
assert if1.parms['stopping_metric']['actual_value'] == 'AUTO'
assert if1._model_json['output']['training_metrics']._metric_json['mean_score'] == if2._model_json['output']['training_metrics']._metric_json['mean_score']
assert if1.parms['categorical_encoding']['input_value'] == 'AUTO'
assert if1.parms['categorical_encoding']['actual_value'] == 'AUTO'
finally:
h2o.rapids("(setproperty \"{}\" \"{}\")".format("sys.ai.h2o.algos.evaluate_auto_model_parameters", "true"))
if __name__ == "__main__":
pyunit_utils.standalone_test(test_isolation_forrest_effective_parameters)
else:
test_isolation_forrest_effective_parameters()
| StarcoderdataPython |
5197342 | import warnings
warnings.warn(
"datalad.plugin.check_dates is deprecated and will be removed in a future "
"release. "
"Use the module from its new location datalad.local.check_dates instead.",
DeprecationWarning)
from datalad.local.check_dates import *
| StarcoderdataPython |
1687314 | <filename>coalescent/scripts/calc_rho.py
#!/usr/bin/env python3
import numpy as np
from scipy import stats
from scipy.spatial.distance import hamming
from skbio import TreeNode, DistanceMatrix, TabularMSA, DNA
from docopt import docopt
import re
def sample_matrix_to_runs(dist, reps=3):
'''Repeats a distance matrix to expand samples to reps.'''
runs = DistanceMatrix(
np.repeat(np.repeat(dist.data, reps, axis=1), reps, axis=0))
runs.ids = ['{}-{}'.format(g, i+1) for g in dist.ids for i in range(reps)]
return runs
def spearman(a, b):
'''Returns spearman's \rho between a and b'''
return stats.spearmanr(a, b).correlation
def distmat_corr(truthfile, distfile, reps=3, corrstat=spearman):
'''Returns correlation between condensed distance matrices, using corrstat'''
distmat = DistanceMatrix.read(distfile)
truthmat = DistanceMatrix.read(truthfile)
truthmat = sample_matrix_to_runs(truthmat, reps)
ids = list(sorted(distmat.ids))
t_ids = list(sorted(truthmat.ids))
assert ids == t_ids, (ids, t_ids)
dist = distmat.filter(ids).condensed_form()
truth = truthmat.filter(ids).condensed_form()
return corrstat(truth, dist)
CLI = '''
USAGE:
calc_rho.py [options] TRUTH OBTAINED
OPTIONS:
-S SEED Seed
-m METRIC Metric
-s SIZE Sketchsize
-c COV Coverage
-v VAR Pi (mean pw dist)
-r REPS Replicate runs per sample
'''
def main():
opts = docopt(CLI)
rho = distmat_corr(opts["TRUTH"], opts["OBTAINED"], int(opts["-r"]))
seed = opts['-S']
metric = opts['-m']
size = opts['-s']
cov = opts['-c']
var = opts['-v']
print(seed, metric, size, cov, var, rho, sep='\t')
if __name__ == "__main__":
main()
| StarcoderdataPython |
11332470 | # -*- coding: utf-8 -*-
import os
import requests
requests.packages.urllib3.disable_warnings()
class Device42BaseException(Exception):
pass
class Device42BadArgumentError(Exception):
pass
class Device42HTTPError(Device42BaseException):
pass
class Device42WrongRequest(Device42HTTPError):
pass
class Device42(object):
def __init__(self, endpoint, user, password, **kwargs):
self.base = endpoint
self.user = user
self.pwd = password
self.verify_cert = False
self.debug = kwargs.get('debug', False)
self.logger = kwargs.get('logger', None)
self.base_url = "%s" % self.base
self.headers = {}
def _send(self, method, path, data=None):
""" General method to send requests """
url = "%s/%s" % (self.base_url, path)
params = None
if method == 'GET':
params = data
data = None
resp = requests.request(method, url, data=data, params=params,
auth=(self.user, self.pwd),
verify=self.verify_cert, headers=self.headers)
if not resp.ok:
raise Device42HTTPError("HTTP %s (%s) Error %s: %s\n request was %s" %
(method, path, resp.status_code, resp.text, data))
retval = resp.json()
return retval
def _get(self, path, data=None):
return self._send("GET", path, data=data)
def _post(self, path, data):
if not path.endswith('/'):
path += '/'
return self._send("POST", path, data=data)
def _put(self, path, data):
if not path.endswith('/'):
path += '/'
return self._send("PUT", path, data=data)
def _delete(self, path):
return self._send("DELETE", path)
def _log(self, message, level="DEBUG"):
if self.logger:
self.logger.log(level.upper(), message)
def get_device_by_name(self, name):
path = "api/1.0/devices/name/%s" % name
return self._get(path)
def get_all_devices(self):
path = "api/1.0/devices/all/"
devices = []
init_data = self._get(path, {'limit': 1, 'offset': 0})
total_count = init_data['total_count']
i = 0
limit = 1000
while i < total_count:
devices_data = self._get(path, {'limit': limit, 'offset': i})
devices = devices + devices_data['Devices']
i += limit
return devices
def doql(self, url, method, query=None):
path = url
if query is None:
query = "SELECT * FROM view_device_v1 order by device_pk"
data = {"output_type": "json", "query": query}
result = self._post(path, data)
return result
def request(self, source_url, method, model):
models = []
if method == "GET":
result = self._get(source_url)
if model in result:
models = result[model]
limit = 0
total_count = 0
if "limit" in result:
limit = result["limit"]
if "total_count" in result:
total_count = result["total_count"]
offset = limit
while offset < total_count:
result = self._get(source_url, data={"offset":offset, "limit":limit})
if model in result:
models += result[model]
offset += limit
return models
| StarcoderdataPython |
3336364 | import pymysql
from pymysql import OperationalError
def test_pymysql_connect_returns_error():
try:
connection = pymysql.connect()
except OperationalError as err:
pass
except BaseException as err:
pass
| StarcoderdataPython |
6596185 | <gh_stars>0
import random
from exceptions import *
symbols = [i for i in range(10)]
difficult = 4
unique_elements = True
def make_code(symbols, difficult):
rnd = random.Random()
# rnd.seed(0)
s = symbols.copy()
res = []
for i in range(1, int(difficult) + 1):
sym =rnd.choice(s)
res.append(sym)
s.remove(sym)
# return rnd.choices(symbols, k=difficult)
return res
def check(code, try_code):
tc = [int(i) for i in try_code]
black = 0
white = 0
for i,j in zip(code, tc):
if i == j:
black += 1
elif j in code:
white += 1
return black, white
def check_rules(try_code, difficult):
if len(try_code) != difficult:
raise LengthError
if len(try_code) != len(set(try_code)):
raise UniqueError
def main():
code = make_code(symbols, difficult)
print('Try to guess secret code for the minimum attempts')
attempts = 0
while True:
print(f'Input {difficult} numbers:')
inp = input()
try:
try_code = [int(i) for i in inp]
except ValueError as e:
print('some symbols not numbers')
continue
if check_rules(try_code):
continue
b,w = check(code, try_code)
print(f'bulls: {b}, cows: {w}')
attempts += 1
if code == try_code:
print('Attempts: ', attempts)
break
if __name__=='__main__':
main()
| StarcoderdataPython |
3362095 | from fourparts import NoteProgression, ToneRow
from tests.test_structures.test_progression.test_ToneRow.tone_row_samples import TONEROW
import pytest
def test_cases():
return [
([9, 2, 11, 4, 5, 7, 6, 8, 1, 10, 3, 0], TONEROW),
([21, 2, 23, 4, 5, 7, 6, 32, 1, 10, 39, 60], TONEROW),
]
@pytest.mark.parametrize("note_ints, expected", test_cases())
def test_eval(note_ints, expected):
assert (
ToneRow.create_tone_row(NoteProgression.create_note_progression(note_ints))
== expected
)
| StarcoderdataPython |
3202338 | from django.db import models
from students.models import Class, Subject, Teacher, Student
from news.models import BaseAbstractPost
class Homework(models.Model):
topic = models.CharField(default='Homework', max_length=50)
subject = models.ForeignKey(Subject, on_delete=models.CASCADE)
clazz = models.ForeignKey(Class, on_delete=models.CASCADE)
deadline = models.DateField(auto_now=False)
details = models.TextField(max_length=256, blank=True)
author = models.ForeignKey(Teacher, null=True, related_name='homeworks', on_delete=models.CASCADE)
def __str__(self):
return '{} ({}) - {}'.format(self.topic, self.subject, self.clazz)
class Meta:
ordering = ['deadline', 'clazz', 'subject']
class Submission(BaseAbstractPost):
homework = models.ForeignKey(Homework, related_name='submissions', on_delete=models.CASCADE)
student = models.ForeignKey(Student, related_name='submissions', on_delete=models.CASCADE)
content = models.TextField(max_length=2048)
solution_url = models.URLField(blank=True)
checked = models.BooleanField(default=False)
def __str__(self):
return '{} - {} ({})'.format(self.student, self.homework, self.posted_on)
class Meta:
ordering = ['-posted_on', '-last_edited_on']
| StarcoderdataPython |
11220201 | import numpy as np
# import tensorboardX as tensorboard
import torch
from torch.utils import tensorboard as tensorboard
from torch.utils.data import DataLoader
from datasets.captioning_dataset import ActivityNetCaptionsDataset
from epoch_loops.captioning_epoch_loops import (greedy_decoder, save_model,
training_loop,
validation_1by1_loop,
validation_next_word_loop)
from loss.label_smoothing import LabelSmoothing
from model.captioning_module import BiModalTransformer, Transformer
from utilities.captioning_utils import average_metrics_in_two_dicts, timer
from utilities.config_constructor import Config
def train_cap(cfg):
# doing our best to make it replicable
torch.manual_seed(0)
np.random.seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# preventing PyTorch from allocating memory on the default device (cuda:0) when the desired
# cuda id for training is not 0.
torch.cuda.set_device(cfg.device_ids[0])
exp_name = cfg.curr_time[2:]
train_dataset = ActivityNetCaptionsDataset(cfg, 'train', get_full_feat=False)
val_1_dataset = ActivityNetCaptionsDataset(cfg, 'val_1', get_full_feat=False)
val_2_dataset = ActivityNetCaptionsDataset(cfg, 'val_2', get_full_feat=False)
# make sure that DataLoader has batch_size = 1!
train_loader = DataLoader(train_dataset, collate_fn=train_dataset.dont_collate)
val_1_loader = DataLoader(val_1_dataset, collate_fn=val_1_dataset.dont_collate)
val_2_loader = DataLoader(val_2_dataset, collate_fn=val_2_dataset.dont_collate)
if cfg.modality == 'audio_video':
model = BiModalTransformer(cfg, train_dataset)
elif cfg.modality in ['video', 'audio']:
model = Transformer(train_dataset, cfg)
criterion = LabelSmoothing(cfg.smoothing, train_dataset.pad_idx)
if cfg.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), cfg.lr, (cfg.beta1, cfg.beta2), cfg.eps,
weight_decay=cfg.weight_decay)
elif cfg.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), cfg.lr, cfg.momentum,
weight_decay=cfg.weight_decay)
if cfg.scheduler == 'reduce_on_plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=cfg.lr_reduce_factor, patience=cfg.lr_patience
)
else:
scheduler = None
model.to(torch.device(cfg.device))
model = torch.nn.DataParallel(model, cfg.device_ids)
param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Total Number of Trainable Parameters: {param_num / 1000000} Mil.')
if cfg.to_log:
TBoard = tensorboard.SummaryWriter(log_dir=cfg.log_path)
TBoard.add_scalar('debug/param_number', param_num, 0)
else:
TBoard = None
# keeping track of the best model
best_metric = 0
# "early stopping" thing
num_epoch_best_metric_unchanged = 0
for epoch in range(cfg.epoch_num):
print(f'The best metrict was unchanged for {num_epoch_best_metric_unchanged} epochs.')
print(f'Expected early stop @ {epoch+cfg.early_stop_after-num_epoch_best_metric_unchanged}')
print(f'Started @ {cfg.curr_time}; Current timer: {timer(cfg.curr_time)}')
# stop training if metric hasn't been changed for cfg.early_stop_after epochs
if num_epoch_best_metric_unchanged == cfg.early_stop_after:
break
# train
training_loop(cfg, model, train_loader, criterion, optimizer, epoch, TBoard)
# validation (next word)
val_1_loss = validation_next_word_loop(
cfg, model, val_1_loader, greedy_decoder, criterion, epoch, TBoard, exp_name
)
val_2_loss = validation_next_word_loop(
cfg, model, val_2_loader, greedy_decoder, criterion, epoch, TBoard, exp_name
)
val_avg_loss = (val_1_loss + val_2_loss) / 2
if scheduler is not None:
scheduler.step(val_avg_loss)
# validation (1-by-1 word)
if epoch >= cfg.one_by_one_starts_at:
# validation with g.t. proposals
val_1_metrics = validation_1by1_loop(
cfg, model, val_1_loader, greedy_decoder, epoch, TBoard
)
val_2_metrics = validation_1by1_loop(
cfg, model, val_2_loader, greedy_decoder, epoch, TBoard
)
if cfg.to_log:
# averaging metrics obtained from val_1 and val_2
metrics_avg = average_metrics_in_two_dicts(val_1_metrics, val_2_metrics)
metrics_avg = metrics_avg['Average across tIoUs']
TBoard.add_scalar('metrics/meteor', metrics_avg['METEOR'] * 100, epoch)
TBoard.add_scalar('metrics/bleu4', metrics_avg['Bleu_4'] * 100, epoch)
TBoard.add_scalar('metrics/bleu3', metrics_avg['Bleu_3'] * 100, epoch)
TBoard.add_scalar('metrics/precision', metrics_avg['Precision'] * 100, epoch)
TBoard.add_scalar('metrics/recall', metrics_avg['Recall'] * 100, epoch)
# saving the model if it is better than the best so far
if best_metric < metrics_avg['METEOR']:
best_metric = metrics_avg['METEOR']
save_model(cfg, epoch, model, optimizer, val_1_loss, val_2_loss,
val_1_metrics, val_2_metrics, train_dataset.trg_voc_size)
# reset the early stopping criterion
num_epoch_best_metric_unchanged = 0
else:
num_epoch_best_metric_unchanged += 1
print(f'{cfg.curr_time}')
print(f'best_metric: {best_metric}')
if cfg.to_log:
TBoard.close()
| StarcoderdataPython |
11377425 | <gh_stars>1-10
#coding=utf-8
"""Module for visualizing common curve
The function of this Module is served for visualizing common curve.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
def plot_cphCoef(dfx, coef_col='coef', se_col='se(coef)', c_col='p', name_col=None, ci=0.95,
error_bar='hr', xlabel="Name of variable", ylabel="",
title="Variable's coefficient of CPH model",
figsize=(8, 6), save_fig_as=""):
"""Visualize variables' coefficient in lifelines.CPH model
Parameters
----------
dfx : pandas.DataFrame
Object equals to cph.summary.
coef_col : str
Name of column indicating coefficient.
se_col : str
Name of column indicating standard error.
c_col: str
Name of column indicating color.
name_col: str
Name of x-axis's column.
ci : float
Confidence interval, default 0.95.
error_bar : str
Type of error bars, 'hr' for asymmetrical error bars,
'log-hr' for symmetrical error bars.
Returns
-------
None
Plot figure of coefficient.
Examples
--------
>>> plot_cphCoef(cph.summary, 'coef', 'se(coef)', 'p')
"""
df = dfx.copy(deep=True)
N = len(df)
if name_col is None:
name_col = 'name__'
df[name_col] = df.index
df['idx'] = range(N)
df['1 - P-value'] = 1 - df[c_col]
# Calculate CI
df['CI'] = abs(norm.ppf((1-ci)/2)) * df[se_col]
# Plot figure
fig, ax = plt.subplots(figsize=figsize)
if error_bar == 'log-hr':
df.plot.scatter(x='idx', y=coef_col, c='1 - P-value',
marker='s', s=120, cmap=plt.cm.get_cmap('YlOrRd'), ax=ax)
ylabel = ('Coefficient' if ylabel == '' else ylabel)
ax.axhline(y=0, linestyle='--', color='black', linewidth=1)
ax.errorbar(df['idx'], df[coef_col], yerr=df['CI'],
ecolor='black', elinewidth=0.8, linestyle='')
elif error_bar == 'hr':
# calculate
df['er_lower'] = np.exp(df[coef_col]) - np.exp(df[coef_col] - df['CI'])
df['er_upper'] = np.exp(df[coef_col] + df['CI']) - np.exp(df[coef_col])
df[coef_col] = np.exp(df[coef_col])
df.plot.scatter(x='idx', y=coef_col, c='1 - P-value',
marker='s', s=120, cmap=plt.cm.get_cmap('YlOrRd'), ax=ax)
ylabel = ('Hazard Ratio' if ylabel == '' else ylabel)
ax.axhline(y=1, linestyle='--', color='black', linewidth=1)
ax.errorbar(df['idx'], df[coef_col], yerr=[df['er_lower'].values, df['er_upper'].values],
ecolor='black', elinewidth=0.8, linestyle='')
else:
# TODO
pass
ax.set_xticks(list(df['idx']))
ax.set_xticklabels(list(df[name_col]), rotation=0)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
if save_fig_as != "":
fig.savefig(save_fig_as, format='png', dpi=600, bbox_inches='tight')
# Drop DataFrame
del df | StarcoderdataPython |
3506633 | <filename>COE/contents/building/farm.py
from COE.contents.entity_types import EntityTypes
from COE.logic.Player import Player
from .storage_building import StorageBuilding
from .market import Market
class Farm(StorageBuilding):
def __init__(self, resource, position: tuple, player: Player):
super().__init__(
name="Farm",
hp=480,
positions=position,
height=3,
width=3,
line_of_sight=1,
resources=resource,
max_held=250,
required_building={Market.__class__.__name__},
required_age=2,
required_researches={},
wood_required=75,
stone_required=0,
construction_time=24,
melee_armor=0,
pierce_armor=0,
entity_type=EntityTypes.GROUND,
player=player,
)
def re_seeding_farm(self):
return "ReSeeding Farm"
| StarcoderdataPython |
4844749 | from typing import Iterable, Mapping, Sequence, Tuple
Index = int
Indices = Sequence[Index]
Offset = int
Word = str
Text = Sequence[Word]
FeatureName = str
FeatureValue = float
Feature = Tuple[FeatureName, FeatureValue]
FeatureVector = Iterable[Feature]
FeatureWindow = Iterable[Tuple[Offset, FeatureVector]]
StringVocabulary = Mapping[str, Index]
| StarcoderdataPython |
6406665 | import re
from ..compatpatch import ClientCompatPatch
USER_CHANNEL_ID_RE = r'^user_[1-9]\d+$'
class IGTVEndpointsMixin:
"""For endpoints in ``/igtv/``."""
def tvchannel(self, channel_id, **kwargs):
"""
Get channel
:param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching'
(as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'
"""
if (channel_id not in ('for_you', 'chrono_following', 'popular', 'continue_watching')
and not re.match(USER_CHANNEL_ID_RE, channel_id)):
raise ValueError(f'Invalid channel_id: {channel_id}')
endpoint = 'igtv/channel/'
params = {'id': channel_id}
params.update(self.authenticated_params)
if kwargs:
params.update(kwargs)
res = self._call_api(endpoint, params=params)
if self.auto_patch:
[ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys)
for m in res.get('items', [])]
return res
def tvguide(self):
"""TV guide to popular, following, suggested channels, etc"""
res = self._call_api('igtv/tv_guide/')
if self.auto_patch:
for c in res.get('channels', []):
[ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys)
for m in c.get('items', [])]
[ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys)
for m in res.get('my_channel', {}).get('items', [])]
return res
def search_igtv(self, text):
"""
Search igtv
:param text: Search term
"""
text = text.strip()
if not text.strip():
raise ValueError('Search text cannot be empty')
res = self._call_api('igtv/search/', query={'query': text})
if self.auto_patch:
for r in res.get('results', []):
[ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys)
for m in r.get('channel', {}).get('items', [])]
if r.get('user'):
ClientCompatPatch.user(r['user'], drop_incompat_keys=self.drop_incompat_keys)
return res
| StarcoderdataPython |
9634272 | #import os
#import csv
import argparse
from parlai.utils.io import PathManager
from parlai.core.teachers import register_teacher, DialogTeacher
from parlai.scripts.display_model import DisplayModel
from parlai.scripts.train_model import TrainModel
parser = argparse.ArgumentParser(description="Dataset Information")
parser.add_argument('--data', type=str, default='parlai/tasks/data/yoruba_dialog.csv', help='location of the dataset')
args = parser.parse_args()
@register_teacher("yo_teacher")
class YoTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
#build(opt) # NOTE: the call to build here
#suffix = 'train' if opt['datatype'].startswith('train') else 'dev'
opt['datafile'] = args.data #os.path.join(opt['datapath'], 'yoruba_dialog.csv')
self.id = 'yodialog'
super().__init__(opt, shared)
def setup_data(self, path):
# note that path is the value provided by opt['datafile']
prompt = "" # initialize prompt message
cnter = 0 # initialize conversation counter
line_skipper = False # to skip 1st row of curated dailog data with header: line
new_episode = False
print('loading: ' + path)
with PathManager.open(path) as data_file:
#self.yodialog = csv.reader(data_file, delimiter=',')
for row in data_file.readlines():
if line_skipper:
row_array = row.rstrip('\n').split(';')
#first_item = row_array[1]
if cnter == 0:
prompt = row_array[1]
cnter += 1
new_episode = True if str.lower(row_array[0]) == 'true' else False
else:
yield {"text": prompt, "labels": row_array[1]}, new_episode
cnter = 0
else:
line_skipper = True
class DefaultTeacher(YoTeacher):
pass
TrainModel.main(
# similar to before
task='yo_teacher',
model='transformer/generator',
model_file='from_pretrained/model_yo',
# initialize with a pretrained model
init_model='zoo:tutorial_transformer_generator/model',
# zoo:wizard_of_wikipedia/full_dialogue_retrieval_model/model
# zoo:light/biranker_dialogue/model
# zoo:pretrained_transformers/poly_model_huge_reddit/model
# BlenderBot 90M
n_heads=16, n_layers=8, n_positions=512, text_truncate=512,
label_truncate=128, ffn_size=2048, embedding_size=512,
activation='gelu', variant='xlm',
dict_lower=True, dict_tokenizer='bpe',
dict_file='zoo:tutorial_transformer_generator/model.dict',
learn_positional_embeddings=True,
#dropout=0.1,
#gradient_clip=0.1,
#lr_scheduler='reduceonplateau',
# BlenderBot 3B
# arguments we get from the pretrained model.
# Unfortunately, these must be looked up separately for each model. variant='xlm',
# n_heads=32, n_layers=24, n_positions=128, text_truncate=128,
# label_truncate=128, ffn_size=10240, embedding_size=2560,
# activation='gelu',
# dict_lower=True, dict_tokenizer='bpe',
# dict_file='zoo:blender/reddit_3B/model.dict',
# learn_positional_embeddings=True,
# variant='prelayernorm',
# n_encoder_layers=2,
# n_decoder_layers=24,
# delimiter=' ',
# lr_scheduler='reduceonplateau',
# model_parallel=True,
# some training arguments, specific to this fine-tuning
# use a small learning rate with ADAM optimizer 1e-5,
lr=1e-05, optimizer='adam',
warmup_updates=100,
# early stopping on perplexity
validation_metric='ppl',
# train at most 10 minutes, and validate every 0.25 epochs
max_train_time=120 * 60, validation_every_n_epochs=0.25,
# depend on your gpu. If you have a V100, this is good
batchsize=6, fp16=True, fp16_impl='mem_efficient',
# speeds up validation
skip_generation=False,
# helps us cram more examples into our gpu at a time
dynamic_batching='full',
)
DisplayModel.main(task='yo_teacher', model_file='from_pretrained/model_yo', num_examples=5, skip_generation=False)
| StarcoderdataPython |
3482873 | <filename>bsm/logger.py
import time
import logging
_MAIN_LOGGER_NAME = 'BSM'
def _time_zone(t):
if t.tm_isdst == 1 and time.daylight == 1:
tz_sec = time.altzone
tz_name = time.tzname[1]
else:
tz_sec = time.timezone
tz_name = time.tzname[0]
if tz_sec > 0:
tz_sign = '-'
else:
tz_sign = '+'
tz_offset = '%s%02d%02d' % (tz_sign, abs(tz_sec)//3600, abs(tz_sec//60)%60)
return (tz_offset, tz_name)
class JsubFormatter(logging.Formatter):
# Add this method in order to display time zone offset correctly under python 2.x
def formatTime(self, record, datefmt=None):
ct = time.localtime(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime('%Y-%m-%d %H:%M:%S', ct)
ms = '%03d' % record.msecs
tz_offset, tz_name = _time_zone(ct)
s = '%s.%03d %s %s' % (t, record.msecs, tz_offset, tz_name)
return s
_FORMATTER = JsubFormatter('[%(asctime)s][%(name)s|%(levelname)-4.4s] %(message)s')
#_FORMATTER = logging.Formatter('[%(asctime)s](%(name)s:%(levelname)s) %(message)s', '%Y-%m-%d %H:%M:%S')
_FORMATTER_SIMPLE = logging.Formatter('[%(levelname)s] %(message)s')
def create_stream_logger(verbose=False, quiet=False):
logger = logging.getLogger(_MAIN_LOGGER_NAME)
if verbose:
logger.setLevel(logging.DEBUG)
elif quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
if verbose:
ch.setFormatter(_FORMATTER)
else:
ch.setFormatter(_FORMATTER_SIMPLE)
logger.handlers = []
logger.addHandler(ch)
def get_logger():
return logging.getLogger(_MAIN_LOGGER_NAME)
| StarcoderdataPython |
11296697 | <gh_stars>1-10
import sys, re
# modified from http://code.activestate.com/recipes/475116/
TERM_ESCAPE = False
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print 'This is '+term.GREEN+'green'+term.NORMAL
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print term.render('This is ${GREEN}green${NORMAL}')
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print 'This terminal supports clearning the screen.'
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout, escape=False):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# when printing things out on lines accepting user input control
# characters must be wrapped in special characters for correct
# word wrapping, always wrap when escape == True
self.escape = escape
# Curses isn't available on all platforms
try: import curses
except: return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, curses.tparm(set_fg, i) or '')
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, curses.tparm(set_fg_ansi, i) or '')
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '')
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '')
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name) or ''
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$':
return s
else:
rendered = getattr(self, s[2:-1])
if self.escape:
rendered = '\001'+rendered+'\002'
return rendered
# convenience methods
def colorize(str,codes='normal'):
term = TerminalController(escape=TERM_ESCAPE)
outstr = ''.join(['${%s}'%l.upper() for l in codes.split(' ')])
return term.render(outstr+str+'${NORMAL}')
def normal(st):
return colorize(st)
# fg colors
def black(st):
return colorize(st, 'black')
def blue(st):
return colorize(st, 'blue')
def green(st):
return colorize(st, 'green')
def cyan(st):
return colorize(st, 'cyan')
def red(st):
return colorize(st, 'red')
def magenta(st):
return colorize(st, 'magenta')
def yellow(st):
return colorize(st, 'yellow')
def white(st):
return colorize(st, 'white')
# bg colors
def bg_black(st):
return colorize(st, 'bg_black')
def bg_blue(st):
return colorize(st, 'bg_blue')
def bg_green(st):
return colorize(st, 'bg_green')
def bg_cyan(st):
return colorize(st, 'bg_cyan')
def bg_red(st):
return colorize(st, 'bg_red')
def bg_magenta(st):
return colorize(st, 'bg_magenta')
def bg_yellow(st):
return colorize(st, 'bg_yellow')
def bg_white(st):
return colorize(st, 'bg_white')
# styles
def bold(st):
return colorize(st, 'bold')
def blink(st):
return colorize(st, 'blink')
def reverse(st):
return colorize(st, 'reverse')
# convenience logging functions
def info(st, fd=sys.stderr):
out_st = 'INFO: %s'%st + TerminalController.CLEAR_EOL + '\n'
if fd:
fd.flush()
fd.write(colorize(out_st, 'bold white'))
return out_st
def warn(st, fd=sys.stderr):
out_st = 'WARN: %s'%st + TerminalController.CLEAR_EOL + '\n'
if fd:
fd.flush()
fd.write(colorize('WARN: ', 'bold magenta') + colorize(st + TerminalController.CLEAR_EOL + '\n', 'bold yellow'))
return out_st
def error(st, fd=sys.stderr):
out_st = 'ERROR: %s'%st + TerminalController.CLEAR_EOL + '\n'
if fd:
fd.flush()
fd.write(colorize('ERROR: ', 'bold red') + colorize(st + TerminalController.CLEAR_EOL + '\n', 'bold red'))
return out_st
def announce(st, fd=sys.stderr):
out_st = '\n' + (' ' + st + ' ').center(max(80, 80 - len(st)), '=') + TerminalController.CLEAR_EOL + '\n'
if fd:
fd.flush()
fd.write(colorize(out_st, 'bold yellow'))
return out_st
def test():
sys.stdout.write(normal('Normal colors:\n'))
sys.stdout.write('This is %s text.\n'%bg_white(black('black')))
sys.stdout.write('This is %s text.\n'%blue('blue'))
sys.stdout.write('This is %s text.\n'%green('green'))
sys.stdout.write('This is %s text.\n'%cyan('cyan'))
sys.stdout.write('This is %s text.\n'%red('red'))
sys.stdout.write('This is %s text.\n'%magenta('magenta'))
sys.stdout.write('This is %s text.\n'%yellow('yellow'))
sys.stdout.write('This is %s text.\n'%white('white'))
sys.stdout.write(bold('Bold colors:\n'))
sys.stdout.write('This is bold %s text.\n'%bold(bg_white(black('black'))))
sys.stdout.write('This is bold %s text.\n'%bold(blue('blue')))
sys.stdout.write('This is bold %s text.\n'%bold(green('green')))
sys.stdout.write('This is bold %s text.\n'%bold(cyan('cyan')))
sys.stdout.write('This is bold %s text.\n'%bold(red('red')))
sys.stdout.write('This is bold %s text.\n'%bold(magenta('magenta')))
sys.stdout.write('This is bold %s text.\n'%bold(yellow('yellow')))
sys.stdout.write('This is bold %s text.\n'%bold(white('white')))
sys.stdout.write(blink('Blinking text\n'))
sys.stdout.write(colorize('Dimmed text\n','dim'))
sys.stdout.write(reverse('Reverse text\n'))
TERM_ESCAPE = True
announce('This is announce')
info('This is info')
warn('This is warn')
error('This is error')
if __name__ == '__main__' :
test()
| StarcoderdataPython |
3225697 | """
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fire
from pyhappn.happn import Relations
from pyhappn.happn import User
from pyhappn.settings import TOKEN
class HappnCli(object):
"""Cli Happn."""
def like_all(self):
"""Like all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 100
for i in range(int(9800 / limit)):
recs = user_inst.get_recommendations(limit, (i * limit))
for rec in recs:
relation = int(rec.get('notifier').get('my_relation'))
if relation == Relations.none:
user_inst.like_user(rec['notifier']['id'])
print('Like {}'.format(rec['notifier']['id']))
def hidden_all(self):
"""Hidden all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
while True:
recs = user_inst.get_recommendations(100)
if not recs:
break
for rec in recs:
relation = int(rec.get('notifier').get('my_relation'))
if (relation != Relations.none):
user_inst.reject_user(rec['notifier']['id'])
print('Hidden {}'.format(rec['notifier']['id']))
def send_message_all_new(self, message):
"""Send message for all new crush"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 20
idx = 0
while True:
offset = idx * limit
idx += 1
recs = user_inst.get_conversations(offset, limit)
if not recs:
break
for rec in recs:
if not rec.get('messages'):
msg = {'message': message}
user_inst.send_message(rec['id'], msg)
def send_message_all(self, message):
"""Send message for all"""
user_inst = User(TOKEN)
device_list = user_inst.get_device_list()
user_inst.set_device_id(device_list[0]['id'])
limit = 20
idx = 70
messages_sent = {}
while True:
offset = idx * limit
idx += 1
recs = user_inst.get_conversations(offset, limit)
if not recs:
break
for rec in recs:
if not messages_sent.get(rec['id']):
msg = {'message': message}
user_inst.send_message(rec['id'], msg)
messages_sent.update({rec['id']: 1})
if __name__ == '__main__':
fire.Fire(HappnCli)
| StarcoderdataPython |
6415949 | import datetime
from django.conf import settings
from django.db import models
from aniMango.bleach_html import bleach_tinymce, bleach_no_tags
class HomeAlert(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
def __str__(self):
return self.title
def save(self):
self.title = bleach_no_tags(self.title)
self.content = bleach_tinymce(self.content)
super(HomeAlert, self).save()
class Meta:
verbose_name_plural = 'Alerts'
def get_year_choices():
return [(r, r) for r in range(1997, datetime.date.today().year + 1)]
class Exec(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.PROTECT, # Make sure deleting history is harder. If needed, just deactivate accounts -Sorc
limit_choices_to={'is_staff': True}
)
exec_role = models.CharField(max_length=100)
place_in_list = models.IntegerField(default=100);
exec_info = models.TextField()
academic_year = models.IntegerField(
"Academic year starting",
choices=get_year_choices(),
default=datetime.datetime.now().year
)
def __str__(self):
return '{0!s} - {1!s} ({2!s})'.format(self.academic_year, self.user.member, self.exec_role)
def save(self):
self.exec_role = bleach_no_tags(self.exec_role)
self.exec_info = bleach_tinymce(self.exec_info)
super(Exec, self).save()
class Meta:
verbose_name_plural = 'Exec'
ordering = ['-academic_year', 'place_in_list']
class HistoryEntry(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
academic_year = models.IntegerField(
"Academic year starting",
choices=get_year_choices(),
default=datetime.datetime.now().year
)
def __str__(self):
return '{0!s}/{1!s} - ({2!s})'.format(self.academic_year, self.academic_year + 1, self.title)
def save(self):
self.title = bleach_no_tags(self.title)
self.body = bleach_tinymce(self.body)
super(HistoryEntry, self).save()
class Meta:
verbose_name_plural = 'History Entries'
ordering = ['-academic_year']
| StarcoderdataPython |
6544303 | <gh_stars>0
import unittest
from easylogger import Log
class TestLogger(unittest.TestCase):
def setUp(self) -> None:
pass
def test_child_log(self):
parent = Log('parent', log_level_str='DEBUG')
# Regular child - should inherit level
child_1 = Log(parent, child_name='child_1')
# Child 2 should have a different log leve
child_2 = Log(parent, child_name='child_2', log_level_str='WARN')
# Child of a child test
child_child = Log(child_1, child_name='child^2')
self.assertTrue(not parent.is_child)
self.assertTrue(child_1.log_level_int == parent.log_level_int)
self.assertTrue(child_2.log_level_int != parent.log_level_int)
child_child.close()
child_2.close()
child_1.close()
parent.close()
def test_none_log(self):
log = Log()
self.assertTrue(isinstance(log.log_name, str))
self.assertTrue(isinstance(log.name, str))
log.error('Test')
log.close()
def test_orphan(self):
"""Test that handlers are still made in the instance of an orphaned child log"""
log = Log(None, child_name='child', log_to_file=True)
log.info('Test')
with self.assertRaises(ValueError) as err:
raise ValueError('Test')
log.close()
def test_filehandler(self):
log = Log('test-filehandler', log_to_file=True)
log2 = Log(log, child_name='child')
self.assertTrue(log2.log_to_file)
self.assertTrue(log.log_path == log2.log_path)
self.assertTrue(len(log.log_obj.handlers) == 2)
self.assertTrue(len(log2.log_obj.handlers) == 0)
log.error('Test exception')
log2.info('test')
log3 = Log(log2, child_name='child of child')
log2.warning('Hello!')
log3.info('HI!')
log3.close()
log2.close()
log.close()
def test_nested_logs(self):
log = Log('main', log_to_file=True)
c_log = None
for i in range(10):
if c_log is None:
c_log = Log(log, child_name=f'child_{i}')
else:
c_log = Log(c_log, child_name=f'child_{i}')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
330466 | # -*- coding: utf-8 -*-
'''
Created on October 17, 2019
@author: <EMAIL>
'''
import pytest
from luna_core.common.Node import Node, CONTAINER_TYPES, ALL_DATA_TYPES
def test_patient_create():
create_string = Node("patient", "my_patient", properties={"namespace":"my_cohort", "Description":"a patient"}).get_create_str()
assert "patient:globals" in create_string
assert "qualified_address: 'my_cohort::my_patient'" in create_string
assert "name" in create_string
assert "Description" in create_string
def test_patient_match():
match_string = Node("patient", "my_patient", properties={"namespace":"my_cohort", "Description":"a patient"}).get_match_str()
assert "patient" in match_string
assert "qualified_address: 'my_cohort::my_patient'" in match_string
assert "Description" not in match_string
def test_cohort_create():
create_string = Node("cohort", "my_cohort", properties={"Description":"a cohort"}).get_create_str()
assert "cohort:globals" in create_string
assert "qualified_address: 'my_cohort::my_cohort'" in create_string
assert "name" in create_string
assert "Description" in create_string
def test_cohort_match():
match_string = Node("cohort", "my_cohort", properties={"Description":"a cohort"}).get_match_str()
assert "cohort" in match_string
assert "qualified_address: 'my_cohort::my_cohort'" in match_string
assert "Description" not in match_string
def test_metadata_create():
properties = {}
properties['namespace'] = "my_cohort"
properties['dim'] = 3
create_string = Node("VolumetricImage", "SCAN-001", properties=properties).get_create_str()
assert "VolumetricImage:globals" in create_string
assert "qualified_address: 'my_cohort::VolumetricImage-SCAN-001'" in create_string
assert "dim" in create_string
def test_metadata_match():
properties = {}
properties['namespace'] = "my_cohort"
properties['dim'] = 3
match_string = Node("VolumetricImage", "SCAN-002", properties=properties).get_match_str()
assert "qualified_address: 'my_cohort::VolumetricImage-SCAN-002" in match_string
assert "dim" not in match_string
def test_get_all_properties():
properties = {}
properties['namespace'] = "my_cohort"
properties['dim'] = 3
all_props = Node("VolumetricImage", "SCAN-001", properties=properties).get_all_props()
assert "name" in all_props
def test_patient_no_namespace():
node = Node("patient", "pid", properties={})
assert node.properties['qualified_address'] == 'pid'
def test_cohort_wrong_properties():
with pytest.raises(TypeError):
Node("cohort", properties={"Description":"a cohort"})
def test_patient_bad_id():
with pytest.raises(ValueError):
Node("patient", "my:patient", properties={"namespace":"my_cohort", "Description":"a patient"})
def test_cohort_bad_id():
with pytest.raises(ValueError):
Node("cohort", "my:cohort", properties={"Description":"a cohort"})
@pytest.mark.parametrize(('node_type'),CONTAINER_TYPES)
def test_container_types(node_type):
node = Node(node_type, "my_node")
assert node.properties["type"] == node_type
@pytest.mark.parametrize(('node_type'),ALL_DATA_TYPES)
def test_container_types(node_type):
node = Node(node_type, "my_node")
assert node.properties["type"] == node_type
assert node.properties["qualified_address"] == node_type + '-' + "my_node"
| StarcoderdataPython |
3383199 | """ =====================================================================================
Copyright (c) 2020 <NAME>, <EMAIL>
===================================================================================== """
from utilis_prediction import *
import os
from scipy.io import loadmat, savemat
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# Load the features of the field data and the lab data
x_CN = np.loadtxt('data/features_ChineseWell.csv', delimiter=',', skiprows=1)
x_US = np.loadtxt('data/features_USWell.csv', delimiter=',', skiprows=1)
x_Lab = np.loadtxt('data/features_LabData.csv', delimiter=',', skiprows=1)
# load the distribution parameters of the training data
dist_training_data = loadmat('data/dist_trainingData.mat')
# Normalize the field data using the distribution of the training data since they lie well
# within the training data distribution
x_CN_norm = (x_CN - dist_training_data['x_avr']) / dist_training_data['x_std']
x_US_norm = (x_US - dist_training_data['x_avr']) / dist_training_data['x_std']
# load the distribution parameters of the lab data
dist_lab_data = loadmat('data/dist_labData.mat')
# Normalize the lab data suing their own distribution since they present significantly different
# distribution in both ranges and patterns from that of the training data
std_lab = dist_lab_data['x_std']
std_lab[std_lab == 0] = 1
x_Lab_norm = (x_Lab - dist_lab_data['x_avr']) / std_lab
print('The feature shape of the selected sublayer of the Chinese well logs: {}'.format(x_CN_norm.shape))
print('The feature shape of the collected lab data: {}'.format(x_Lab_norm.shape))
print('The feature shape of the selected sublayer of the US well logs: {}'.format(x_US_norm.shape))
# load model
model = load_model('ckpt/best_weight[64,128,256,64].hdf5')
# predict
y_CN_norm = model.predict(x_CN_norm, batch_size=None, verbose=1, steps=None)
y_Lab_norm = model.predict(x_Lab_norm, batch_size=None, verbose=1, steps=None)
y_US_norm = model.predict(x_US_norm, batch_size=None, verbose=1, steps=None)
# scale the raw predictions to normal ranges
# benchmark the field data with the distribution of the training data
# benchmark the lab data with their own distribution
y_CN = y_CN_norm * dist_training_data['y_std'] + dist_training_data['y_avr']
y_US = y_US_norm * dist_training_data['y_std'] + dist_training_data['y_avr']
y_Lab = y_Lab_norm * dist_lab_data['y_std'] + dist_lab_data['y_avr']
# calculate anisotropy parameters based on chosen Hudson-Cheng model
epsilonCN, gammaCN, deltaCN, crackdCN, c33_dnnCN, c44_dnnCN = Hudson_Cheng_model(
x_CN[:, 4], x_CN[:, 5], y_CN, x_CN[:, 0])
epsilonUS, gammaUS, deltaUS, crackdUS, c33_dnnUS, c44_dnnUS = Hudson_Cheng_model(
x_US[:, 4], x_US[:, 5], y_US, x_US[:, 0])
epsilonLab, gammaLab, deltaLab, crackdLab, c33_dnnLab, c44_dnnLab = Hudson_Cheng_model(
x_Lab[:, 4], x_Lab[:, 5], y_Lab, x_Lab[:, 0])
plot_results(x_CN[:, 4], x_CN[:, 5], c33_dnnCN, c44_dnnCN, 'CN')
plot_results(x_US[:, 4], x_US[:, 5], c33_dnnUS, c44_dnnUS, 'US')
plot_results(x_Lab[:, 4], x_Lab[:, 5], c33_dnnLab, c44_dnnLab, 'Lab')
plt.show()
# save predictions
savemat('pred/predictions_CN.mat',
{'k0': y_CN[:, 0], 'mu0': y_CN[:, 1], 'alpha': y_CN[:, 2], 'dc': crackdCN,
'epsilon': epsilonCN, 'gamma': gammaCN, 'delta': deltaCN})
savemat('pred/predictions_US.mat',
{'k0': y_US[:, 0], 'mu0': y_US[:, 1], 'alpha': y_US[:, 2], 'dc': crackdUS,
'epsilon': epsilonUS, 'gamma': gammaUS, 'delta': deltaUS})
savemat('pred/predictions_Lab.mat',
{'k0': y_Lab[:, 0], 'mu0': y_Lab[:, 1], 'alpha': y_Lab[:, 2], 'epsilon': epsilonLab,
'gamma': gammaLab, 'delta': deltaLab, 'c33': c33_dnnLab, 'c44': c44_dnnLab})
| StarcoderdataPython |
8003266 | # Imports
import json
import torch
from torchvision import datasets, transforms, models
__all__ = ["load_dir", "dataloader", "write_labels"]
def load_dir(path):
"""
Loads the image to be used in training, validating, and testing.
Input: path as a String, to the parent folder
Output: three directories, that are subfolders of the parent
"""
train_dir = path + '/train'
valid_dir = path + '/valid'
test_dir = path + '/test'
return train_dir, valid_dir, test_dir
def dataloader(dir, transforms):
"""
Loads image data from directories
Input: Three dirs train, valid, and test as a tuple, and a tuple ->
(train, test) transforms
Output: Three dataloaders, trainloader, validloader testloader
Example of usage:
image, label = next(iter(train_loader))
"""
train_dir, valid_dir, test_dir = dir
# Load the data
train_data = datasets.ImageFolder(train_dir, transform=transforms[0])
valid_data = datasets.ImageFolder(valid_dir, transform=transforms[1])
test_data = datasets.ImageFolder(test_dir, transform=transforms[1])
# Create dataloaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=True)
return train_data, train_loader, valid_loader, test_loader
def write_labels(path):
"""
Loads categorical labels for image data from json file
Input: path to json file
Output: dictionary
"""
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
return cat_to_name | StarcoderdataPython |
52867 | # Топ-3 + Выигрышные номера последнего тиража
def test_top_3_winning_numbers_last_draw(app):
app.ResultAndPrizes.open_page_results_and_prizes()
app.ResultAndPrizes.click_game_top_3()
app.ResultAndPrizes.button_get_report_winners()
assert "ВЫИГРЫШНЫЕ НОМЕРА" in app.ResultAndPrizes.parser_report_text_winners()
app.ResultAndPrizes.message_id_33_top_3_last_draw()
app.ResultAndPrizes.message_id_33_top_3_winning_numbers_last_draw()
app.ResultAndPrizes.comeback_main_page() | StarcoderdataPython |
9689769 | #!/usr/bin/env python
# Time-stamp: <2008-02-04 13:20:05 <NAME>>
"""Module Description
Copyright (c) 2007 <NAME> <<EMAIL>>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: <NAME>
@contact: <EMAIL>
"""
# ------------------------------------
# python modules
# ------------------------------------
import os
import sys
import re
from optparse import OptionParser
# ------------------------------------
# constants
# ------------------------------------
MIN_DIST = 50
MAX_DIST = 500
# ------------------------------------
# Misc functions
# ------------------------------------
# ------------------------------------
# Classes
# ------------------------------------
class Pos:
def __init__ (self,chr,s):
self.chr = chr
self.start =s
class Pair:
def __init__ (self,n):
self.n = n
self.left = []
self.right = []
def addleft (self,pos):
self.left.append(pos)
def addright (self,pos):
self.right.append(pos)
def pair(self):
n = 0
for rp in self.right:
for lp in self.left:
if lp.chr == rp.chr:
dist = rp.start - lp.start
if dist<MAX_DIST and dist>MIN_DIST:
n+=1
return n
# ------------------------------------
# Main function
# ------------------------------------
def main():
usage = "usage: %prog [options]"
description = "Analyze the mapping result from xMAN, report the ratio for unique mapping\nIt's the step #3 after sample_seq.py (#1) and xMAN(#2) of the whole pipeline"
optparser = OptionParser(version="%prog 0.1",description=description,usage=usage,add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-i","--ifile",dest="ifile",type="string",
help="input xMAN mapping file")
optparser.add_option("-o","--ofile",dest="ofile",type="string",
help="output file")
optparser.add_option("-p","--pair",dest="pair",action="store_true",
help="Whether or not to parse the pair-end mapping result")
(options,args) = optparser.parse_args()
# ... you have alarge list of positions
if not options.ifile or not optiions.ofile:
optparser.print_help()
sys.exit(1)
ifhd = open(options.ifile,"r")
ofhd = open(options.ofile,"w")
col_tagno = 4
col_chr = 2
col_start = 3
pairs = {}
for l in ifhd.readlines():
if l.startswith("#"): continue
fields = l.split("\t")
#print fields
chr = fields[col_chr]
start = int(fields[col_start])
tagno = int(fields[col_tagno])
right = False
if tagno % 2 ==0:
tagno-=1
right = True
if not pairs.has_key(tagno):
pairs[tagno]=Pair(tagno)
if chr == "Nomatch":
continue
if right:
pairs[tagno].addright(Pos(chr,start))
else:
pairs[tagno].addleft(Pos(chr,start))
ns = pairs.keys()
ns.sort()
total_unique_pairs = 0
total_pairs = len(ns)
for n in ns:
p = pairs[n].pair()
ofhd.write( "%d\t%d\n" % (n,p))
if p == 1:
total_unique_pairs += 1
ofhd.write( "total: %d\nmapped: %d\nratio: %.2f%%\n" % (total_pairs,total_unique_pairs,float(total_unique_pairs)/total_pairs*100) )
ofhd.close()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
msgl(_("\n;-) See you!"))
sys.exit(0)
| StarcoderdataPython |
378001 | <gh_stars>1-10
import json
import logging
import config as cfg
from modules.const import Keys, DeviceKey, AttrKey
from modules.zabbix.sender import send_to_zabbix
logger = logging.getLogger(__name__)
"""zabbixにDevice LLDデータを送信します。
"""
def send_device_discovery(data):
logger.info("Sending device discovery to zabbix")
diskDetails = data["diskDetail"]
discovery_result = []
for disk in diskDetails:
discovery_result.append({
DeviceKey.DISK_KEY: disk[Keys.KEY],
DeviceKey.DISK_NAME: disk[Keys.NAME]
})
data = {"request": "sender data", "data": []}
valueStr = json.dumps({"data": discovery_result})
logger.debug(json.dumps(discovery_result, indent=2, ensure_ascii=False))
discoveryData = {
"host": cfg.ZABBIX_HOST,
"key": DeviceKey.ZBX_KEY,
"value": f"{valueStr}"
}
data["data"].append(discoveryData)
send_to_zabbix(data)
return None
def send_device_data(data):
logger.info("Send data to zabbix")
diskDetails = data["diskDetail"]
NO_SEND = [Keys.ID, Keys.KEY, Keys.NAME, Keys.INTERFACE, Keys.SMART]
results = []
for disk in diskDetails:
for key in disk.keys():
if key in NO_SEND:
continue
if disk[key] != None: # Noneを送っても意味がないので送らない
results.append({
"host": cfg.ZABBIX_HOST,
"key": Keys.zabbix_key(key, disk[Keys.KEY]),
"value": disk[key]
})
sender_data = {"request": "sender data", "data": results}
send_to_zabbix(sender_data)
return None
| StarcoderdataPython |
3438122 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
| StarcoderdataPython |
3220354 | import defs_query_estacionamento as f
NAME = 'root'
SENHA = ''
HOST = 'localhost'
DATABASE = 'shopping_estacionamento'
db, cursor = f.open_db(NAME, SENHA, HOST, DATABASE)
for bloco in range(1, 5):
for andar in range(1, 4):
for vaga in range(1, 81):
if andar == 1 and vaga <= 25:
secao = 'A'
elif andar == 1:
secao = 'B'
elif andar == 2 and vaga <= 25:
secao = 'C'
elif andar == 2:
secao = 'D'
elif andar == 3 and vaga <= 25:
secao = 'E'
elif andar == 3:
secao = 'F'
tipo_veiculo_vaga = 'moto' if 50 <= vaga < 70 else 'carro'
tipo = 'PCD' if vaga >= 70 else 'Normal'
if db:
cursor.execute(f"""insert into vagas(bloco,andar,secao,vaga,tipo,tipo_veiculo_vaga,status_vaga)
values ('{bloco}','{andar}','{secao}','{vaga}','{tipo}','{tipo_veiculo_vaga}','livre')""")
db.commit()
db.close()
| StarcoderdataPython |
8073327 | <reponame>eugman/eugeneQuest
from app import app, db
from app.models import *
from app.config import *
from typing import List
from flask import render_template, request, Response
from flask_sqlalchemy import SQLAlchemy
@app.route('/weeklies', methods=['GET', 'POST'])
def weeklies():
player = db.session.query(Player).get(1)
player.messages = ""
hour = datetime.datetime.now().hour
result = request.form
if result.get("complete"):
weekly_id = result.get("weekly_id")
weekly = db.session.query(weekly).get(weekly_id)
weekly.completed = True
weekly.completedLast = datetime.datetime.now()
db.session.commit()
player.messages += addPoints(db, points)
allweeklies = getQuests("All")
openweeklies = getQuests("Open")
completedweeklies = getQuests("Completed")
return render_template("weeklies.html", weeklies = openweeklies, completed = completedweeklies, player = player)
def getQuests( status:str = "Open") -> List[Weekly]:
"""Takes in types of quests and returns a list of weeklies."""
hour = datetime.datetime.now().hour
isWork = 1 if datetime.datetime.today().weekday() in (0, 1, 2, 3, 4) and 9 <= hour < 18 else -1
query = Weekly.query
#Filter based on the Status
if status == "Open":
query = query.filter_by(completed = False)
elif status == "Completed":
query = query.filter_by(completed = True)
else:
pass
weeklies = query.all()
weeklies = filter(lambda x: x.isWork == 0 or x.isWork == isWork, weeklies)
return list(weeklies)
| StarcoderdataPython |
1630521 | from rest_framework import viewsets
from serializers import OrderSerializer
from models import *
# Create your views here.
class OrderViewSet(viewsets.ModelViewSet):
"""
allow to browse and edit API endpoint
"""
queryset = Order.objects.all()
serializer_class = OrderSerializer
| StarcoderdataPython |
5126506 | <filename>Basic/ratings-counter.py<gh_stars>0
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
import collections
import datetime
spark = SparkSession\
.builder\
.appName("PythonRatings")\
.getOrCreate()
print("-------------------------------------#Program Started--------------------------------------")
now = datetime.datetime.now()
print(now.strftime("%Y-%m-%d %H:%M:%S"))
df = spark.read.csv(r"D:\dataset\ml-20m\movies.csv")
#ratings = lines.map(lambda x: x.split(",")[2])
#result = lines.countByValue()
data=df.sort(col("_c0").desc()).collect()
for key in data:
print(key[0]+" "+key[1]+" "+key[2])
print("-------------------------------------#Program Stopped--------------------------------------")
now = datetime.datetime.now()
print(now.strftime("%Y-%m-%d %H:%M:%S")) | StarcoderdataPython |
11396433 | # Copyright (c) 2020 kamyu. All rights reserved.
#
# Google Code Jam 2013 Round 1B - Problem C. Garbled Email
# https://code.google.com/codejam/contest/2434486/dashboard#s=p2
#
# Time: hash: O(N * L^3), N is the number of words
# , L is the max length of words
# dp: O(S * D * L^4)
# Space: hash: O(N * L^3)
# dp: O(S * D)
#
def garbled_email():
S = raw_input().strip()
dp = [[float("inf") for _ in xrange(D)] for _ in xrange(len(S)+1)]
dp[0][-D] = 0
for i in xrange(len(S)):
for j in xrange(-D, 0):
for l in xrange(1, min(L, len(S)-i)+1):
word = S[i:i+l]
# no change
if word in LOOKUP:
prev = max(j-l, -D) # merge states
dp[i+l][prev] = min(dp[i+l][prev], dp[i][j])
# one change
for k in xrange(j+D, l):
if word[:k]+'*'+word[k+1:] not in LOOKUP:
continue
prev = max(k-l, -D) # merge states
dp[i+l][prev] = min(dp[i+l][prev], dp[i][j]+1)
# two changes (since D = 5, and L = 10 in the given dictionary, there is at most 2 changes)
for k in xrange(j+D, l):
for m in xrange(k+D, l):
if word[:k]+'*'+word[k+1:m]+'*'+word[m+1:] not in LOOKUP:
continue
prev = max(m-l, -D) # merge states
dp[i+l][prev] = min(dp[i+l][prev], dp[i][j]+2)
return min(dp[-1])
D, L = 5, 0
LOOKUP = set()
with open("garbled_email_dictionary.txt") as f:
for line in f:
word = line.strip()
L = max(L, len(word))
LOOKUP.add(word)
for j in xrange(len(word)):
LOOKUP.add(word[:j]+'*'+word[j+1:])
for j in xrange(len(word)-D):
for k in xrange(j+D, len(word)):
LOOKUP.add(word[:j]+'*'+word[j+1:k]+'*'+word[k+1:])
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, garbled_email())
| StarcoderdataPython |
8102425 | from .resnet import *
from .mobilenet import *
from .mnasnet import *
from .hrnet import *
| StarcoderdataPython |
207087 | import cv2
import time
import mediapipe as np
import math
class handDetector():
def __init__(self, mode=False, maxhands=2, detectcon=0.5, trackcon=0.5):
self.mode=mode
self.maxhands=maxhands
self.detectcon=detectcon
self.trackcon=trackcon
self.nphands=np.solutions.hands
self.hands=self.nphands.Hands(self.mode,self.maxhands, self.detectcon, self.trackcon)
self.npDraw= np.solutions.drawing_utils
self.tipIds=[4, 8, 12, 16, 20]
def findhands(self, img, draw=True):
imgRGB=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results=self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for singlehand in self.results.multi_hand_landmarks:
if draw:
self.npDraw.draw_landmarks(img, singlehand, self.nphands.HAND_CONNECTIONS)
return img
def findpositions(self, img, handno=0, draw=True):
xlist=[]
ylist=[]
boundarybox=[]
self.lmlist=[]
if self.results.multi_hand_landmarks:
myhand=self.results.multi_hand_landmarks[handno]
for id, lm in enumerate(myhand.landmark):
#print(id,lm)
h,w,c= img.shape
cx, cy= int(lm.x*w), int(lm.y*h)
xlist.append(cx)
ylist.append(cy)
#print(id, cx, cy) #location with id
self.lmlist.append([id, cx,cy])
#marking a specific id
if draw:
cv2.circle(img, (cx,cy), 5, (2500,0,250), cv2.FILLED)
xmin, xmax=min(xlist), max(xlist)
ymin, ymax=min(ylist), max(ylist)
boundarybox= xmin, ymin, xmax, ymax
#if draw:
#cv2.rectangle(img, (boundarybox[0]-20, boundarybox[1]-20), (boundarybox[2]+20, boundarybox[3]+20), (0,255,0), 2)
return self.lmlist, boundarybox
def fingersup(self):
fingers=[]
#thumb
if self.lmlist[self.tipIds[0]][1] > self.lmlist[self.tipIds[0] - 1] [1]:
fingers.append(1)
else:
fingers.append(0)
#fingers
for id in range(1,5):
if self.lmlist[self.tipIds[id]][2] < self.lmlist[self.tipIds[id] - 2] [2]:
fingers.append(1)
else:
fingers.append(0)
return fingers
def finddistance(self, p1, p2, img, draw=True):
x1, y1=self.lmlist[p1][1], self.lmlist[p1][2]
x2, y2=self.lmlist[p2][1], self.lmlist[p2][2]
cx, cy= (x1+x2)//2, (y1+y2)//2
cv2.circle(img, (x1,y1), 15, (255,0,255), cv2.FILLED)
cv2.circle(img, (x2,y2), 15, (255,0,255), cv2.FILLED)
cv2.line(img, (x1, y1), (x2,y2), (255,0,255), 2)
cv2.circle(img, (cx, cy), 15, (255,0,255), cv2.FILLED)
length=math.hypot(x2-x1, y2-y1)
#print(length)
return length, img, (x1,y1, x2,y2, cx,cy)
def main():
ptime=0
ctime=0
cap=cv2.VideoCapture(0)
detector=handDetector()
while True:
success, img=cap.read()
img=detector.findhands(img)
lmlist=detector.findpositions(img)
if len(lmlist) !=0:
print(lmlist[0])
ctime=time.time()
fps=1/(ctime-ptime)
ptime=ctime
cv2.putText(img, str(int(fps)), (5,100), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (20,0,20),4)
cv2.imshow("image", img)
cv2.waitKey(1)
if __name__=="__main__":
main() | StarcoderdataPython |
1790380 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from requests import Response
from typing import Any, Optional, Mapping
Headers = Optional[Mapping[str, str]]
class RequestError(Exception):
""" Error que se genera cuando hay un fallo accediendo al servidor"""
def __init__(self, url: str, headers: Headers, body: Any, response: Response) -> None:
self.url = url
self.headers = headers
self.body = body
self.status_code = response.status_code
self.text = response.text
self.response = response
super().__init__(self.message())
def message(self) -> str:
return "code {}, body: {}".format(self.status_code, self.text)
class FormatError(Exception):
"""Error que se genera cuando la respuesta recibida del servidor no tiene el formato correcto"""
def __init__(self, url: str, headers: Headers, body: Any, json: str, key: str) -> None:
self.url = url
self.headers = headers
self.body = body
self.json = json
self.key = key
super().__init__(self.message())
def message(self) -> str:
return "missing key: {}, body: {}".format(self.key, json.dumps(self.json, indent=4))
| StarcoderdataPython |
5123364 | <reponame>Satyam-Bhalla/Competitive-Coding
# A Dynamic Programming based Python Program for the Egg Dropping Puzzle
INT_MAX = 10000000000
def eggDrop(n, k):
eggFloor = [[0 for x in range(k+1)] for x in range(n+1)]
for i in range(1, n+1):
eggFloor[i][1] = 1
eggFloor[i][0] = 0
for j in range(1, k+1):
eggFloor[1][j] = j
# Fill rest of the entries in table using optimal substructure property
for i in range(2, n+1):
for j in range(2, k+1):
eggFloor[i][j] = INT_MAX
for x in range(1, j+1):
res = 1 + max(eggFloor[i-1][x-1], eggFloor[i][j-x])
if res < eggFloor[i][j]:
eggFloor[i][j] = res
# eggFloor[n][k] holds the result
return eggFloor[n][k]
t = int(input())
for _ in range(t):
n,k = map(int,input().split())
print(eggDrop(n,k)) | StarcoderdataPython |
1997067 | <reponame>WuQianyong/awesome_web
#!/usr/bin/env Python3
# -*- coding: utf-8 -*-
#
# Name : phan_demo
# Fatures:
# Author : qianyong
# Time : 2017-06-01 16:19
# Version: V0.0.1
#
from selenium import webdriver
import time
# profile_dir = r''
# driver = webdriver.Chrome(executable_path=r'C:\Users\wqy\AppData\Local\Google\Chrome\Application\chromedriver.exe')
# driver = webdriver.Firefox(executable_path=r'D:\Program Files (x86)\Mozilla Firefox\firefox.exe')
print('开始启动天眼查')
dcap = {"phantomjs.page.settings.userAgent": (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36")}
driver = webdriver.PhantomJS(desired_capabilities=dcap)
# driver = webdriver.Firefox(executable_path='D:\\drivers\\geckodriver.exe')
driver.maximize_window()
company_name = '湖州永久电线电缆有限公司'
# 打开天眼查首页
index_url = 'http://www.tianyancha.com/'
driver.get(index_url)
print('打开 {}'.format( driver.title))
if driver.title == 'Error':
time.sleep(5)
driver.get(index_url)
print(driver.title)
time.sleep(5)
driver.find_element_by_id("live-search").send_keys(company_name)
print('天眼查搜索 {}'.format(company_name))
time.sleep(3)
# button = driver.find_element_by_class_name('input-group-addon search_button')
button_s = driver.find_elements_by_tag_name('span')
for b in button_s:
if b.text == '天眼一下':
print('点击')
b.click()
break
time.sleep(10)
print('打开 {} 的搜索结果'.format(company_name))
# driver.get(index_url)
# time.sleep(5)
# print(driver.title)
#
#
#
#
# search_url = 'http://www.tianyancha.com/search?key={}&checkFrom=searchBox'.format(company_name)
# print(search_url)
# driver.get(search_url)
# time.sleep(30)
# print(driver.title)
# 搜索结果
search_list = driver.find_elements_by_xpath('//*/a[@class="query_name search-new-color ng-isolate-scope"]')
flag = False
for s_item in search_list:
records = s_item.find_elements_by_tag_name('span')
search_url = s_item.get_attribute('href')
search_name = [record.text for record in records][0]
print('{} 的 链接是 {}'.format(search_name,search_url))
if search_name == company_name:
flag = True
break
print('{} 的 链接是 {}'.format(search_name, search_url))
if not flag:
print('不存在 搜索的公司:{}'.format(company_name))
else:
print('存在 搜索的公司:{}'.format(company_name))
time.sleep(2)
driver.get(search_url)
# driver.get("http://www.tianyancha.com/company/23402373")
print('sleep 30')
time.sleep(30)
print(driver.title)
# 先点击 详情
try:
xiang = driver.find_element_by_xpath('//*/a[@ng-show="needFolder"]')
xiang.click()
print('已经点击 经营范围 的详细')
# print(xiang.text)
except Exception as e:
# print(e)
pass
# 公司的联系信息
contact_table = driver.find_elements_by_xpath('//*/div[@class="in-block vertical-top"]')
contact_table2 = driver.find_elements_by_xpath('//*/div[@class="in-block vertical-top overflow-width mr20"]')
print(len(contact_table))
info_list = [contact_table, contact_table2]
for info_table in info_list:
for contact_item in info_table:
item_list = contact_item.find_elements_by_tag_name('span')
if item_list:
for item in item_list:
print('item ----> {}'.format(item.text))
time.sleep(1)
# 公司的基本信息1
base_table1 = driver.find_element_by_xpath('//*/table[@class="table companyInfo-table text-center f14"]')
base_content_list = base_table1.find_elements_by_tag_name('td')
for base_content in base_content_list:
print('base_info ----- > {} '.format(base_content.text.replace('他的所有公司 >', '')))
# 公司的基本信息2
base_div = driver.find_element_by_xpath('//*/div[@class="row b-c-white company-content base2017"]')
base_content_td_name = base_div.find_elements_by_class_name('c8')
# base_content_td_content = base_div.find_elements_by_class_name('ng-binding')
for z in base_content_td_name:
if z.text:
print(z.text)
driver.quit()
| StarcoderdataPython |
1831460 | import torch
from torch import LongTensor
from torch.utils.data import DataLoader, TensorDataset
from .Constants import *
def create_vocab(file_list, vocab_num=-1):
def create_corpus(file):
with open(file, 'r') as f:
corpus = [word.lower() for line in f.readlines() for word in line.strip('\n').split()]
return corpus
corpus = []
for file in file_list:
corpus.extend(create_corpus(file))
word2index = {}; index2word = {}
word2index[PAD_WORD] = PAD; index2word[PAD] = PAD_WORD
word2index[UNK_WORD] = UNK; index2word[UNK] = UNK_WORD
word2index[BOS_WORD] = BOS; index2word[BOS] = BOS_WORD
word2index[EOS_WORD] = EOS; index2word[EOS] = EOS_WORD
if vocab_num != -1:
word_count = {}
for word in corpus:
if word_count.get(word) is None:
word_count[word] = 1
else:
word_count[word] += 1
w_count = [[word, word_count[word]] for word in word_count.keys()]
w_count.sort(key=lambda elem: elem[1], reverse=True)
w_count = [w_count[i][0] for i in range(min(len(w_count), vocab_num))]
else:
w_count = set(corpus)
for word in w_count:
word2index[word] = len(word2index)
index2word[len(index2word)] = word
return word2index, index2word
def lang(filelist, word2index, PAD, BOS=None, EOS=None, max_len=None):
data = []
for file in filelist:
with open(file, 'r') as f:
data.extend([[word.lower() for word in line.strip('\n').split()] for line in f.readlines()])
if max_len is not None:
for i in range(len(data)):
if len(data[i]) > max_len:
ed = data[i][-1]
data[i] = data[i][:max_len-1]
data[i].append(ed)
def prepare_sequence(seq, word2index, max_length, PAD, BOS, EOS):
pad_length = max_length - len(seq)
if BOS is not None:
seq = [BOS] + seq
if EOS is not None:
seq += [EOS]
seq += [PAD] * pad_length
return list(map(lambda word: word2index[UNK_WORD]
if word2index.get(word) is None else word2index[word], seq))
max_length = max([len(seq) for seq in data])
return list(map(lambda seq: prepare_sequence(seq, word2index, max_length,
PAD, BOS, EOS), data))
def get_dataloader(source, target=None, src_inputs=None, src_outputs=None,
tgt_inputs=None, tgt_outputs=None,
batch_size=64, shuffle=False):
batch_size = batch_size
source = LongTensor(source)
try:
target = LongTensor(target)
src_inputs = LongTensor(src_inputs)
src_outputs = LongTensor(src_outputs)
tgt_inputs = LongTensor(tgt_inputs)
tgt_outputs = LongTensor(tgt_outputs)
data = TensorDataset(source, target,
src_inputs, src_outputs,
tgt_inputs, tgt_outputs)
except:
data = source
return DataLoader(data, batch_size=batch_size, shuffle=shuffle)
def get_pretrain_dataloader(source, tgt_input, tgt_output, batch_size=64, shuffle=None):
source = LongTensor(source)
tgt_input = LongTensor(tgt_input)
tgt_output = LongTensor(tgt_output)
data = TensorDataset(source, tgt_input, tgt_output)
return DataLoader(data, batch_size=batch_size, shuffle=shuffle)
def translate2word(sequence, index2word):
return [[index2word[index] for index in seq] for seq in sequence] | StarcoderdataPython |
1729931 | <gh_stars>0
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import *
__all__ = ['ParamAttr', 'ExtraAttr', 'ParameterAttribute',
'ExtraLayerAttribute']
class ParameterAttribute(object):
"""
Parameter Attributes object. To fine-tuning network training process, user
can set attribute to control training details, such as l1,l2 rate / learning
rate / how to init param.
NOTE: IT IS A HIGH LEVEL USER INTERFACE.
:param is_static: True if this parameter will be fixed while training.
:type is_static: bool
:param initial_std: Gauss Random initialization standard deviation.
None if not using Gauss Random initialize parameter.
:type initial_std: float or None
:param initial_mean: Gauss Random initialization mean.
None if not using Gauss Random initialize parameter.
:type initial_mean: float or None
:param initial_max: Uniform initialization max value.
:type initial_max: float or None
:param initial_min: Uniform initialization min value.
:type initial_min: float or None
:param l1_rate: the l1 regularization factor
:type l1_rate: float or None
:param l2_rate: the l2 regularization factor
:type l2_rate: float or None
:param learning_rate: The parameter learning rate. None means 1.
The learning rate when optimize is LEARNING_RATE =
GLOBAL_LEARNING_RATE * PARAMETER_LEARNING_RATE
* SCHEDULER_FACTOR.
:type learning_rate: float or None
:param momentum: The parameter momentum. None means use global value.
:type momentum: float or None
:param sparse_update: Enable sparse update for this parameter. It will
enable both local and remote sparse update.
:type sparse_update: bool
"""
def __init__(self, name=None, is_static=False, initial_std=None,
initial_mean=None, initial_max=None, initial_min=None,
l1_rate=None, l2_rate=None, learning_rate=None, momentum=None,
sparse_update=False):
# initialize strategy.
if is_static:
self.attr = {'is_static': True}
elif initial_std is None and initial_mean is None and initial_max \
is None and initial_min is None:
self.attr = {'initial_smart': True}
elif isinstance(initial_std, float) or isinstance(initial_mean, float):
self.attr = dict()
if initial_std is not None:
self.attr['initial_std'] = initial_std
if initial_mean is not None:
self.attr['initial_mean'] = initial_mean
self.attr['initial_strategy'] = 0 # Gauss Random
elif isinstance(initial_max, float) and isinstance(initial_min, float):
assert initial_min < initial_max
initial_mean = (initial_max + initial_min) / 2
initial_std = initial_mean - initial_min
self.attr = dict()
self.attr['initial_mean'] = initial_mean
self.attr['initial_std'] = initial_std
self.attr['initial_strategy'] = 1 # Uniform Random
else:
raise RuntimeError("Unexpected branch.")
if not is_static and isinstance(l1_rate, float):
self.attr['decay_rate_l1'] = l1_rate
if not is_static and isinstance(l2_rate, float):
self.attr['decay_rate'] = l2_rate
if not is_static and isinstance(learning_rate, float):
self.attr['learning_rate'] = learning_rate
if not is_static and isinstance(momentum, float):
self.attr['momentum'] = momentum
if name is not None:
self.attr['parameter_name'] = name
if sparse_update:
self.attr['sparse_update'] = True
self.attr['sparse_remote_update'] = True
def set_default_parameter_name(self, name):
"""
Set default parameter name. If parameter not set, then will use default
parameter name.
:param name: default parameter name.
:type name: basestring
"""
if 'parameter_name' not in self.attr:
self.attr['parameter_name'] = name
@staticmethod
def to_bias(bias_attr):
if isinstance(bias_attr, ParameterAttribute):
return Bias(**bias_attr.attr)
else:
return False
class ExtraLayerAttribute(object):
"""
Some high level layer attributes config. You can set all attributes here,
but some layer doesn't support all attributes. If you set an attribute to a
layer that not support this attribute, paddle will print an error and core.
:param error_clipping_threshold: Error clipping threshold.
:type error_clipping_threshold: float
:param drop_rate: Dropout rate. Dropout will create a mask on layer output.
The dropout rate is the zero rate of this mask. The
details of what dropout is please refer to `here
<https://www.cs.toronto.edu/~hinton/absps/
JMLRdropout.pdf>`_
:type drop_rate: float
"""
def __init__(self, error_clipping_threshold=None, drop_rate=None):
self.attr = dict()
if isinstance(error_clipping_threshold, float):
assert error_clipping_threshold > 0
self.attr["error_clipping_threshold"] = error_clipping_threshold
if isinstance(drop_rate, float):
assert drop_rate > 0
self.attr["drop_rate"] = drop_rate
def check(self, layer_name):
for key in self.attr:
if not hasattr(self, 'can_%s' % key) or \
not getattr(self, 'can_%s' % key):
raise NotImplementedError(
"Layer %s cannot support %s" % (layer_name, key))
@staticmethod
def to_kwargs(attr):
if attr is None:
return dict()
else:
return attr.attr
ParamAttr = ParameterAttribute
ExtraAttr = ExtraLayerAttribute
| StarcoderdataPython |
9694670 | import c4d
"""
Storing the AutoIK Hardcoded values to be replaced later down the road
so this isn't necessary
"""
"""Face Capture
"""
# facial_morphs = [
# c4d.FACECAPTURE_BLENDSHAPE_LEFTEYE_BLINK,
# c4d.FACECAPTURE_BLENDSHAPE_LEFTEYE_LOOKDOWN,
# c4d.FACECAPTURE_BLENDSHAPE_LEFTEYE_LOOKIN,
# c4d.FACECAPTURE_BLENDSHAPE_LEFTEYE_LOOKOUT,
# c4d.FACECAPTURE_BLENDSHAPE_LEFTEYE_LOOKUP,
# c4d.FACECAPTURE_BLENDSHAPE_LEFTEYE_SQUINT,
# c4d.FACECAPTURE_BLENDSHAPE_LEFTEYE_WIDE,
# c4d.FACECAPTURE_BLENDSHAPE_RIGHTEYE_BLINK,
# c4d.FACECAPTURE_BLENDSHAPE_RIGHTEYE_LOOKDOWN,
# c4d.FACECAPTURE_BLENDSHAPE_RIGHTEYE_LOOKIN,
# c4d.FACECAPTURE_BLENDSHAPE_RIGHTEYE_LOOKOUT,
# c4d.FACECAPTURE_BLENDSHAPE_RIGHTEYE_LOOKUP,
# c4d.FACECAPTURE_BLENDSHAPE_RIGHTEYE_SQUINT,
# c4d.FACECAPTURE_BLENDSHAPE_RIGHTEYE_WIDE,
# c4d.FACECAPTURE_BLENDSHAPE_JAW_FORWARD,
# c4d.FACECAPTURE_BLENDSHAPE_JAW_LEFT,
# c4d.FACECAPTURE_BLENDSHAPE_JAW_RIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_JAW_OPEN,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_CLOSE,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_FUNNEL,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_PUCKER,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_LEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_RIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_SMILELEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_SMILERIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_FROWNLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_FROWNRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_DIMPLELEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_DIMPLERIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_STRETCHLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_STRETCHRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_ROLLLOWER,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_ROLLUPPER,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_SHRUGLOWER,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_SHRUGUPPER,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_PRESSLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_PRESSRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_LOWERDOWNLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_LOWERDOWNRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_UPPERUPLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_MOUTH_UPPERUPRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_BROW_DOWNLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_BROW_DOWNRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_BROW_INNERUP,
# c4d.FACECAPTURE_BLENDSHAPE_BROW_OUTERUPLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_BROW_OUTERUPRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_CHEEK_PUFF,
# c4d.FACECAPTURE_BLENDSHAPE_CHEEK_SQUINTLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_CHEEK_SQUINTRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_NOSE_SNEERLEFT,
# c4d.FACECAPTURE_BLENDSHAPE_NOSE_SNEERRIGHT,
# c4d.FACECAPTURE_BLENDSHAPE_TONGUE_OUT
# ]
""" [ ctrl_joint, joint ]
"""
constraint_joints = [
["Collar", "jCollar"],
["ShldrBend", "jArm"],
["ForearmBend", "jForeArm"],
["Shldr", "jArm"], # Genesis 2
["ForeArm", "jForeArm"], # Genesis 2
["Hand", "jHand"],
["hip", "jPelvis"],
["pelvis", "jPelvis"],
["abdomenLower", "jSpine"],
["abdomenUpper", "jAbdomenUpper"],
["chestLower", "jChest"],
["chestUpper", "jChestUpper"],
["neckLower", "jNeck"],
["abdomen", "jSpine"], # Genesis 2
["chest", "jChest"], # Genesis 2
["neck", "jNeck"], # Genesis 2
["head", "jHead"],
["ThighBend", "jUpLeg"],
["Thigh", "jUpLeg"], # Genesis 2
["Foot", "jFoot"],
["Shin", "jLeg"],
["Toe", "jToes"],
["Index1", "jIndex1"],
["Index2", "jIndex2"],
["Index3", "jIndex3"],
["Mid1", "jMiddle1"],
["Mid2", "jMiddle2"],
["Mid3", "jMiddle3"],
["Ring1", "jRing1"],
["Ring2", "jRing2"],
["Ring3", "jRing3"],
["Pinky1", "jPink1"],
["Pinky2", "jPink2"],
["Pinky3", "jPink3"],
["Thumb1", "jThumb1"],
["Thumb2", "jThumb2"],
["Thumb3", "jThumb3"],
]
rig_joints = [
["Collar", "jCollar"],
["ShldrBend", "jArm"],
["ForearmBend", "jForeArm"],
["Shldr", "jArm"], # Genesis 2
["ForeArm", "jForeArm"], # Genesis 2
["Hand", "jHand"],
["hip", "jPelvis"],
["pelvis", "jPelvis"],
["abdomenLower", "jSpine"],
["abdomenUpper", "jAbdomenUpper"],
["chestLower", "jChest"],
["chestUpper", "jChestUpper"],
["neckLower", "jNeck"],
["abdomen", "jSpine"], # Genesis 2
["chest", "jChest"], # Genesis 2
["neck", "jNeck"], # Genesis 2
["head", "jHead"],
["ThighBend", "jUpLeg"],
["Thigh", "jUpLeg"], # Genesis 2
["Foot", "jFoot"],
["Shin", "jLeg"],
["Toe", "jToes"],
["SmallToe2_2", "jToes_end"],
["Index1", "jIndex1"],
["Index2", "jIndex2"],
["Index3", "jIndex3"],
["Mid1", "jMiddle1"],
["Mid2", "jMiddle2"],
["Mid3", "jMiddle3"],
["Ring1", "jRing1"],
["Ring2", "jRing2"],
["Ring3", "jRing3"],
["Pinky1", "jPink1"],
["Pinky2", "jPink2"],
["Pinky3", "jPink3"],
["Thumb1", "jThumb1"],
["Thumb2", "jThumb2"],
["Thumb3", "jThumb3"],
]
""" [ Guide, joint ]
"""
guides_for_rig = [
["Collar", "lCollar", "chest"],
["AbdomenUpper", "abdomenUpper"],
["ChestUpper", "chestUpper"],
["Shoulder", "lShldr"], # Genesis 2
["Elbow", "lForeArm"], # Genesis 2
["Shoulder", "lShldrBend"],
["Elbow", "lForearmBend"],
["Hand", "lHand"],
["Index1", "lIndex1"],
["Index2", "lIndex2"],
["Index3", "lIndex3"],
["Index_end", "lIndex3"],
["Middle1", "lMid1"],
["Middle2", "lMid2"],
["Middle3", "lMid3"],
["Middle_end", "lMid3"],
["Ring1", "lRing1"],
["Ring2", "lRing2"],
["Ring3", "lRing3"],
["Ring_end", "lRing3"],
["Pinky1", "lPinky1"],
["Pinky2", "lPinky2"],
["Pinky3", "lPinky3"],
["Pinky_end", "lPinky3"],
["Thumb1", "lThumb1"],
["Thumb2", "lThumb2"],
["Thumb3", "lThumb3"],
["Thumb_end", "lThumb3"],
["LegUpper", "lThighBend"],
["LegUpper", "lThigh"],
["Knee", "lShin"],
["Foot", "lFoot"],
["Toes", "lToe"],
["Toes_end", "lSmallToe2_2"],
["Toes_end", "lSmallToe2"],
["Pelvis", "hip"],
["Spine_Start", "abdomenLower"],
["Chest_Start", "chestLower"],
["Neck_Start", "neckLower"],
["Neck_End", "head"],
["Spine_Start", "abdomen"],
["Chest_Start", "chest"],
["Neck_Start", "neck"],
["Head_End", "head_end"], # Labeled temp...
]
guides_to_mirror = [
"Pinky_end",
"Pinky3",
"Pinky2",
"Pinky1",
"Ring_end",
"Ring3",
"Ring2",
"Ring1",
"Middle_end",
"Middle3",
"Middle2",
"Middle1",
"Index_end",
"Index3",
"Index2",
"Index1",
"Thumb_end",
"Thumb2",
"Thumb3",
"Thumb1",
"Hand",
"Elbow",
"Shoulder",
"Toes_end",
"Toes",
"Foot",
"Knee",
"LegUpper",
]
""" [ ctrl_joint, Parent, Guide ]
"""
center_joints = [
["jPelvis", "", "Pelvis"],
["jSpine", "jPelvis", "Spine_Start"],
["jAbdomenUpper", "jSpine", "AbdomenUpper"],
["jChest", "jAbdomenUpper", "Chest_Start"],
["jChestUpper", "jChest", "ChestUpper"],
["jNeck", "jChestUpper", "Neck_Start"],
["jHead", "jNeck", "Neck_End"],
["jHeadEnd", "jHead", "Head_End"],
]
arm_joints = [
["jCollar", "jChestUpper", "Collar"],
["jArm", "jCollar", "Shoulder"],
["jForeArm", "jArm", "Elbow"],
["jHand", "jForeArm", "Hand"],
]
leg_joints = [
["jUpLeg", "jPelvis", "LegUpper"],
["jLeg", "jUpLeg", "Knee"],
["jFoot", "jLeg", "Foot"],
["jFoot2", "", "Foot"],
["jToes", "jFoot2", "Toes"],
["jToes_end", "jToes", "Toes_end"],
]
thumb_joints = [
["jThumb1", "", "Thumb1"],
["jThumb2", "jThumb1", "Thumb2"],
["jThumb3", "jThumb2", "Thumb3"],
["jThumb_end", "jThumb3", "Thumb_end"],
]
""" [ ctrl_shape, joint, preset, constraint, parent ]
"""
ik_controls = [
["IK_Foot", "jFoot", "zeroRotInvisible"],
["Toe_Rot", "jToes", "sphereToe"],
["Foot_Roll", "jToes", "cube"],
["IK_Hand", "jHand", "cube"],
["Collar_ctrl", "jCollar", "collar"],
["Foot_Platform", "IK_Foot", "Foot_Platform"],
["ToesEnd", "jToes_end", "none"],
["Pelvis_ctrl", "jPelvis", "pelvis"],
["ForearmTwist_ctrl", "lForearmTwist", "twist"],
["ForearmTwist_ctrl___R", "rForearmTwist", "twist"],
["Spine_ctrl", "jSpine", "spine"],
["AbdomenUpper_ctrl", "jAbdomenUpper", "spine"],
["ChestUpper_ctrl", "jChestUpper", "spine"],
["Foot_PlatformBase", "jFoot", "Foot_PlatformNEW"],
["Foot_PlatformBase___R", "jFoot___R", "Foot_PlatformNEW"],
["Chest_ctrl", "jChest", "spine"],
["Neck_ctrl", "jNeck", "neck"],
["Head_ctrl", "jHead", "head"],
]
ik_tags = [
["jUpLeg", "jFoot", "IK_Foot", "jUpLeg.Pole", "LegUpper", "Negative"],
["jArm", "jHand", "IK_Hand", "jArm.Pole", "Shoulder", ""],
]
daz_controls = [
["IK_Foot", "Foot", "zeroRotInvisible", "None"],
["Toe_Rot", "Toe", "sphereToe", "None"],
["Foot_Roll", "Toe", "cube", "None"],
["IK_Hand", "Hand", "cube", "None"],
["Collar_ctrl", "Collar", "collar", ""],
["Foot_Platform", "IK_Foot", "Foot_Platform", "UPVECTOR"],
["Pelvis_ctrl", "hip", "pelvis"],
["ForearmTwist_ctrl", "lForearmTwist", "twist"],
["ForearmTwist_ctrl___R", "rForearmTwist", "twist"],
["Spine_ctrl", "abdomenLower", "spine"],
["AbdomenUpper_ctrl", "abdomenUppe", "spine"],
["ChestUpper_ctrl", "chestUpper", "spine"],
["Foot_PlatformBase", "Foot", "Foot_PlatformNEW"],
["Chest_ctrl", "chest", "spine"],
["Neck_ctrl", "neck", "neck"],
["Head_ctrl", "head", "head"],
]
daz_tags = [
["Shin", "Foot", "IK_Foot", "Shin.Pole", "ThighBend", "Negative"],
["ShldrBend", "Hand", "IK_Hand", "ShldrBend.Pole", "ForeArm", ""],
]
| StarcoderdataPython |
1769774 | <reponame>CI-WATER/tethysapp-parleys_creek_management
import os
from datetime import datetime
from time import time
from django.http import JsonResponse
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from xlrd import open_workbook, xldate_as_tuple
from itertools import izip
from ..model import ManagementScenario, SessionMaker
from ..model import (LITTLE_DELL_VOLUME,
LITTLE_DELL_RELEASE,
LITTLE_DELL_SPILL,
MOUNTAIN_DELL_VOLUME,
MOUNTAIN_DELL_RELEASE,
MOUNTAIN_DELL_SPILL,
DELL_CREEK_INFLOW,
LAMBS_CREEK_INFLOW,
RELIABILITY)
from ..lib.goldsim import runLittleDellGoldSim
from ..lib import get_package_name, CKAN_ENGINE
def jobs(request):
"""
Start a new scenario in the scenario table
"""
# Get user id
user_id = request.user.id
# Get a session
session = SessionMaker()
scenarios_list = session.query(ManagementScenario.id,
ManagementScenario.name,
ManagementScenario.description,
ManagementScenario.last_updated,
ManagementScenario.job_status,
ManagementScenario.percentage,
ManagementScenario.results_link). \
filter(ManagementScenario.user_id == str(user_id)). \
order_by(ManagementScenario.last_updated.desc()). \
all()
# Initialize paginator
page_number = request.GET.get('page')
paginator = Paginator(scenarios_list, 10)
# Define pager format
pager_format = '''
<ul class="pagination">
<li><a href="#">1</a></li>
<li><a href="#">1</a></li>
<li><a href="#">1</a></li>
</ul>
'''
try:
# Return the requested page
scenarios = paginator.page(page_number)
except PageNotAnInteger:
# Deliver first page if page is not an integer
scenarios = paginator.page(1)
except EmptyPage:
# Deliver last page if page number is out of range
scenarios = paginator.page(len(scenarios_list))
# Template context
context = {'scenarios': scenarios,
'paginator': paginator,
'statuses': ('pending', 'success', 'error'),
'nav': 'scenarios'}
return render(request, 'parleys_creek_management/jobs/jobs.html', context)
def delete(request, scenario_id):
"""
Delete the scenario
"""
# Retrieve the scenario
session = SessionMaker()
scenario = session.query(ManagementScenario).filter(ManagementScenario.id == scenario_id).one()
# Delete the current scenario
session.delete(scenario)
session.commit()
return redirect('parleys_creek_management:jobs')
def status(request, scenario_id):
"""
Return job status information for a job
"""
# Get user id
user_id = str(request.user.id)
# Get a session
session = SessionMaker()
scenario = session.query(ManagementScenario).get(scenario_id)
# Defaults
job_status = None
percentage = None
link = None
if scenario and scenario.user_id == user_id:
job_status = scenario.job_status
percentage = scenario.percentage
link = reverse('parleys_creek_management:results_view',
kwargs={'scenario_id': scenario_id, 'plot_name': 'little-dell-volume'})
# Form response
if percentage >= 100:
json_response = {'status': job_status, 'percentage': percentage, 'link': link}
else:
json_response = {'status': job_status, 'percentage': percentage, 'link': None}
return JsonResponse(json_response)
def run(request, scenario_id):
"""
Run the model action
"""
# Get user id
user_id = str(request.user.id)
# Get a session
session = SessionMaker()
scenario = session.query(ManagementScenario). \
filter(ManagementScenario.user_id == user_id). \
filter(ManagementScenario.id == scenario_id). \
one()
scenario.job_status = 'processing'
scenario.percentage = 0
session.commit()
# Get arguments for the web service
arguments = scenario.get_web_service_inputs()
# Get Path to Workspace and unique file name
workspace_dir = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'workspace')
unique_file_name = request.user.username + datetime.now().strftime('%Y%d%m%H%M%S') + '.xls'
out_path = os.path.join(workspace_dir, unique_file_name)
# Update status of scenario in the database to processing
scenario.percentage = 25
session.commit()
# Set timeout to be 10 minutes
timeout = time() + 3 * 60 # seconds
frequency = 3 # seconds
# If timeout occurs, will be marked as error
job_status = 'error'
error_message = ''
# Start execution
execution = runLittleDellGoldSim(arguments, out_path)
# Check status until time-out happens
while not execution.isComplete():
if time() >= timeout:
# kill request
break
execution.checkStatus(sleepSecs=frequency)
if execution.isSucceded():
# Update status in db
scenario.job_status = 'downloading results'
scenario.percentage = 50
session.commit()
# Get results
execution.getOutput(out_path)
job_status = 'success'
# Get package name from app.ini
package_name = get_package_name()
result = {'success': False}
# Push file to ckan dataset
try:
# Push file to ckan dataset
resource_name = scenario.name
description = '{0} \<Created by {1} on {2}\>'.format(scenario.description, request.user.username,
datetime.now().strftime('%B, %d %Y @ %H:%M'))
result = CKAN_ENGINE.create_resource(dataset_id=package_name, file=out_path, name=resource_name,
format='xls', model='PCMT-GOLDSIM', description=description)
except Exception as e:
error_message = 'PCMT RUN WARNING: {0}'.format(e.message)
job_status = 'error'
print(error_message)
# Get link of the resource
if result['success']:
results_link = result['result']['url']
else:
error_message = 'PCMT RUN WARNING: Job execution failed.'
results_link = None
job_status = 'error'
print(error_message)
# Parse results into python data structures and cache in database for visualization
scenario.job_status = 'processing results'
scenario.percentage = 75
session.commit()
try:
parsed_results = parse_results(out_path)
scenario.set_results(parsed_results)
except Exception as e:
error_message = 'PCMT RUN WARNING: {0}'.format(e.message)
job_status = 'error'
print(error_message)
# Delete temp file in workspace
try:
os.remove(out_path)
except Exception as e:
error_message = 'PCMT RUN WARNING: {0}'.format(e.message)
print(error_message)
# Update the scenario job status
scenario.results_link = results_link
# Update status in db
scenario.job_status = job_status
scenario.percentage = 100
session.commit()
results_link = scenario.results_link
# Assemble response object
if error_message != '':
json_response = {'status': job_status, 'link': results_link}
else:
json_response = {'status': job_status, 'link': results_link, 'message': error_message}
session.close()
return JsonResponse(json_response)
def parse_results(filename):
"""
This method is used to parse the results into Python data structures.
"""
results = dict()
# Get a handle on the workbook
workbook = open_workbook(filename)
# Get handles on the sheets
little_dell = workbook.sheet_by_index(0)
mountain_dell = workbook.sheet_by_index(1)
inflows = workbook.sheet_by_index(2)
reliability = workbook.sheet_by_index(3)
for sheet_index in range(workbook.nsheets):
sheet = workbook.sheet_by_index(sheet_index)
sheet_name = sheet.name
if sheet_name == 'Little Dell':
little_dell = sheet
elif sheet_name == 'Mountain Dell':
mountain_dell = sheet
elif sheet_name == 'Lambs and Dell Creeks':
inflows = sheet
elif sheet_name == 'Reliability':
reliability = sheet
##
# Little Dell
##
# Parse Sheet and hack of headers (top three rows)
ld_time = little_dell.col_values(0)[3:]
ld_volume = little_dell.col_values(1)[3:]
ld_release = little_dell.col_values(2)[3:]
ld_spill = little_dell.col_values(3)[3:]
# Convert decimal date to datetime
ld_datetime = []
for dec_time in ld_time:
time_tuple = xldate_as_tuple(dec_time, workbook.datemode)
ld_datetime.append(datetime(*time_tuple))
# Stitch together
ld_volume_series = [list(i) for i in izip(ld_datetime, ld_volume)]
ld_release_series = [list(i) for i in izip(ld_datetime, ld_release)]
ld_spill_series = [list(i) for i in izip(ld_datetime, ld_spill)]
# Create series dictionaries
ld_volume_dict = {'title': 'Little Dell Volume',
'subtitle': '',
'y_axis_title': 'Volume',
'y_axis_units': 'kaf',
'series': ld_volume_series}
results[LITTLE_DELL_VOLUME] = ld_volume_dict
ld_release_dict = {'title': 'Little Dell Release',
'subtitle': '',
'y_axis_title': 'Flowrate',
'y_axis_units': 'af/d',
'series': ld_release_series}
results[LITTLE_DELL_RELEASE] = ld_release_dict
ld_spill_dict = {'title': 'Little Dell Spills',
'subtitle': '',
'y_axis_title': 'Flowrate',
'y_axis_units': 'af/d',
'series': ld_spill_series}
results[LITTLE_DELL_SPILL] = ld_spill_dict
##
# Mountain Dell
##
# Parse Sheet and hack of headers (top three rows)
md_time = mountain_dell.col_values(0)[3:]
md_volume = mountain_dell.col_values(1)[3:]
md_release = mountain_dell.col_values(2)[3:]
md_spill = mountain_dell.col_values(3)[3:]
# Convert decimal date to datetime
md_datetime = []
for dec_time in md_time:
time_tuple = xldate_as_tuple(dec_time, workbook.datemode)
md_datetime.append(datetime(*time_tuple))
# Stitch together
md_volume_series = [list(i) for i in izip(md_datetime, md_volume)]
md_release_series = [list(i) for i in izip(md_datetime, md_release)]
md_spill_series = [list(i) for i in izip(md_datetime, md_spill)]
# Create series dictionaries
md_volume_dict = {'title': 'Mountain Dell Volume',
'subtitle': '',
'y_axis_title': 'Volume',
'y_axis_units': 'kaf',
'series': md_volume_series}
results[MOUNTAIN_DELL_VOLUME] = md_volume_dict
md_release_dict = {'title': 'Mountain Dell Release',
'subtitle': '',
'y_axis_title': 'Flowrate',
'y_axis_units': 'af/d',
'series': md_release_series}
results[MOUNTAIN_DELL_RELEASE] = md_release_dict
md_spill_dict = {'title': 'Mountain Dell Spills',
'subtitle': '',
'y_axis_title': 'Flowrate',
'y_axis_units': 'af/d',
'series': md_spill_series}
results[MOUNTAIN_DELL_SPILL] = md_spill_dict
##
# Inflows
##
# Parse Sheet and hack of headers (top three rows)
inflow_time = inflows.col_values(0)[3:]
inflow_dell_creek = inflows.col_values(1)[3:]
inflow_lamb_creek = inflows.col_values(2)[3:]
# Convert decimal date to datetime
inflow_datetime = []
for dec_time in inflow_time:
time_tuple = xldate_as_tuple(dec_time, workbook.datemode)
inflow_datetime.append(datetime(*time_tuple))
# Stitch together
dell_creek_series = [list(i) for i in izip(inflow_datetime, inflow_dell_creek)]
lamb_creek_series = [list(i) for i in izip(inflow_datetime, inflow_lamb_creek)]
# Create series dictionaries
dell_creek_dict = {'title': 'Dell Creek Inflow',
'subtitle': '',
'y_axis_title': 'Flowrate',
'y_axis_units': 'cfs',
'series': dell_creek_series}
results[DELL_CREEK_INFLOW] = dell_creek_dict
lamb_creek_dict = {'title': 'Lambs Creek Inflow',
'subtitle': '',
'y_axis_title': 'Flowrate',
'y_axis_units': 'cfs',
'series': lamb_creek_series}
results[LAMBS_CREEK_INFLOW] = lamb_creek_dict
##
# Reliability
##
results[RELIABILITY] = reliability.cell_value(3, 6)
print results[RELIABILITY]
return results | StarcoderdataPython |
3538563 | from six import text_type, binary_type, integer_types
from openapi_core.schema.schemas.enums import SchemaFormat, SchemaType
from openapi_core.schema.schemas.exceptions import (
InvalidSchemaValue, InvalidCustomFormatSchemaValue,
OpenAPISchemaError, MultipleOneOfSchema, NoOneOfSchema,
InvalidSchemaProperty,
UnmarshallerStrictTypeError,
)
from openapi_core.schema.schemas.util import (
forcebool, format_date, format_datetime, format_byte, format_uuid,
format_number,
)
class StrictUnmarshaller(object):
STRICT_TYPES = ()
def __call__(self, value, type_format=SchemaFormat.NONE, strict=True):
if self.STRICT_TYPES and strict and not isinstance(
value, self.STRICT_TYPES):
raise UnmarshallerStrictTypeError(value, self.STRICT_TYPES)
return value
class PrimitiveTypeUnmarshaller(StrictUnmarshaller):
FORMATTERS = {
SchemaFormat.NONE: lambda x: x,
}
def __init__(self, custom_formatters=None):
if custom_formatters is None:
custom_formatters = {}
self.custom_formatters = custom_formatters
def __call__(self, value, type_format=SchemaFormat.NONE, strict=True):
value = super(PrimitiveTypeUnmarshaller, self).__call__(
value, type_format=type_format, strict=strict)
try:
schema_format = SchemaFormat(type_format)
except ValueError:
formatter = self.custom_formatters.get(type_format)
else:
formatters = self.get_formatters()
formatter = formatters.get(schema_format)
if formatter is None:
raise InvalidSchemaValue(
"Unsupported format {type} unmarshalling "
"for value {value}",
value, type_format)
try:
return formatter(value)
except ValueError as exc:
raise InvalidCustomFormatSchemaValue(
"Failed to format value {value} to format {type}: {exception}",
value, type_format, exc)
def get_formatters(self):
return self.FORMATTERS
class StringUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (text_type, binary_type)
FORMATTERS = {
SchemaFormat.NONE: text_type,
SchemaFormat.PASSWORD: text_type,
SchemaFormat.DATE: format_date,
SchemaFormat.DATETIME: format_datetime,
SchemaFormat.BINARY: binary_type,
SchemaFormat.UUID: format_uuid,
SchemaFormat.BYTE: format_byte,
}
class IntegerUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = integer_types
FORMATTERS = {
SchemaFormat.NONE: int,
SchemaFormat.INT32: int,
SchemaFormat.INT64: int,
}
class NumberUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (float, ) + integer_types
FORMATTERS = {
SchemaFormat.NONE: format_number,
SchemaFormat.FLOAT: float,
SchemaFormat.DOUBLE: float,
}
class BooleanUnmarshaller(PrimitiveTypeUnmarshaller):
STRICT_TYPES = (bool, )
FORMATTERS = {
SchemaFormat.NONE: forcebool,
}
| StarcoderdataPython |
3462345 | <gh_stars>0
from django.db import models
class Video(models.Model):
title = models.CharField("Title", max_length=250)
embed_code = models.TextField("Embed Code")
def __str__(self):
return self.title
| StarcoderdataPython |
84377 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2021 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Quantum Operation
"""
from typing import List, Union, Optional, Callable, TYPE_CHECKING
import numpy
from Quanlse.QPlatform import Error
if TYPE_CHECKING:
from Quanlse.QOperation.FixedGate import FixedGateOP
from Quanlse.QOperation.RotationGate import RotationGateOP
from Quanlse.QRegPool import QRegStorage
OperationFunc = Callable[[*'QRegStorage'], None]
RotationArgument = float
class QOperation:
"""
Basic classes for quantum operation
"""
def __init__(self, name: Optional[str] = None, bits: Optional[int] = None,
matrix: Optional[numpy.ndarray] = None) -> None:
"""
Constructor for QOperation class
"""
self.name = name
self.bits = bits
self._matrix = matrix
def getMatrix(self) -> numpy.ndarray:
"""
Returns a numpy ndarray
:return: returned matrix in ndarray
"""
if self.__class__.__name__ == 'FixedGateOP':
return self._matrix
elif self.__class__.__name__ == 'RotationGateOP':
if self._matrix is None:
self._matrix = self.generateMatrix()
return self._matrix
elif self.__class__.__name__ == 'CustomizedGateOP':
return self._matrix
else:
raise Error.ArgumentError(f'{self.__class__.__name__} do not have matrix!')
def _op(self, qRegList: List['QRegStorage']) -> None:
"""
Quantum operation base
:param qRegList: quantum register list
:return: None
"""
env = qRegList[0].env
for qReg in qRegList:
if qReg.env != env:
raise Error.ArgumentError('QReg must belong to the same env!')
if env.__class__.__name__ == 'QProcedure':
raise Error.ArgumentError('QProcedure should not be operated!')
if self.bits is not None and self.bits != len(
qRegList): # Barrier and QProcedure does not match bits configuration
raise Error.ArgumentError('The number of QReg must match the setting!')
if len(qRegList) <= 0:
raise Error.ArgumentError('Must have QReg in operation!')
if len(qRegList) != len(set(qReg for qReg in qRegList)):
raise Error.ArgumentError('QReg of operators in circuit are not repeatable!')
circuitLine = CircuitLine()
circuitLine.data = self
circuitLine.qRegIndexList = [qReg.index for qReg in qRegList]
env.circuit.append(circuitLine)
Operation = Union[
'FixedGateOP', 'RotationGateOP']
class CircuitLine:
"""
This class defines a quantum gate in the quantum circuit model.
It specifies two key components to characterize a quantum gate:
The gate and its operated qubit indices.
"""
data: None # type: Operation
qRegIndexList: None # type: List[int]
def __init__(self, data: Operation = None, qRegIndexList: List[int] = None):
r"""
Initialize a quantum gate instance.
:param data: a Quanlse.QOperation.Operation instance,
the quantum gate to be applied.
:param qRegIndexList: a list of qubit indices.
If `gate` is a single-qubit
gate, then `qubits` still be a List of the form `[i]`
"""
self.data = data
self.qRegIndexList = qRegIndexList
| StarcoderdataPython |
3451073 | <filename>problem.py
import os
import numpy as np
import pandas as pd
import rampwf as rw
from rampwf.score_types.base import BaseScoreType
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import mean_squared_error
problem_title = 'Prediction of suicide rates'
_target_column_name = 'rate-total'
# A type (class) which will be used to create wrapper objects for y_pred
Predictions = rw.prediction_types.make_regression()
# An object implementing the workflow
workflow = rw.workflows.Estimator()
class MSE(BaseScoreType):
is_lower_the_better = True
minimum = 0.0
maximum = float('inf')
def __init__(self, name='mse', precision=5):
self.name = name
self.precision = precision
def __call__(self, y_true, y_pred):
mse = mean_squared_error(y_true, y_pred)
return mse
score_types = [
MSE(name='mse', precision=5),
]
def get_cv(X, y):
cv = ShuffleSplit(n_splits=8, test_size=0.20, random_state=42)
return cv.split(X, y)
def _read_data(path, f_name):
data = pd.read_csv(os.path.join(path, 'data', f_name))
y_array = data[_target_column_name].values
X_df = data.drop(_target_column_name, axis=1)
return X_df, y_array
def get_train_data(path='.'):
f_name = 'train.csv'
return _read_data(path, f_name)
def get_test_data(path='.'):
f_name = 'test.csv'
return _read_data(path, f_name)
| StarcoderdataPython |
3401147 | print("What is your name?")
print("This is")
input()
print("How old are you?")
print("It is")
input()
print("Where are you live?")
print("(S)he live in")
input()
| StarcoderdataPython |
8195308 | try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='hola',
version='0.1',
description='',
author='',
author_email='',
url='',
install_requires=[
"Pylons>=1.0.1rc1",
"SQLAlchemy>=0.5",
],
setup_requires=["PasteScript>=1.6.3"],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
test_suite='nose.collector',
package_data={'hola': ['i18n/*/LC_MESSAGES/*.mo']},
#message_extractors={'hola': [
# ('**.py', 'python', None),
# ('templates/**.mako', 'mako', {'input_encoding': 'utf-8'}),
# ('public/**', 'ignore', None)]},
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points="""
[paste.app_factory]
main = hola.config.middleware:make_app
[paste.app_install]
main = pylons.util:PylonsInstaller
""",
)
| StarcoderdataPython |
5082871 | <reponame>H0oxy/sportcars
from django.contrib.auth.views import LoginView
from rest_framework.viewsets import ModelViewSet
from authapp.forms import MyAuthForm
from authapp.models import UserProfile
from authapp.serializers import UserProfileSerializer
class UserViewSet(ModelViewSet):
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
class MyLogin(LoginView):
template_name = 'authapp/login.html'
form_class = MyAuthForm
| StarcoderdataPython |
6554195 | <reponame>mkirby1995/DS-Unit-3-Sprint-2-SQL-and-Databases
import sqlite3
conn = sqlite3.connect("""/Users/mattkirby/Desktop/demo_data.sqlite3""")
curs = conn.cursor()
create_table = """CREATE TABLE demo(
s VARCHAR(5),
x INT,
y INT,
PRIMARY KEY(s)
);"""
curs.execute(create_table)
insert_values = """INSERT INTO demo (s, x, y)
VALUES
('g', 3, 9),
('v', 5, 7),
('f', 8, 7);"""
curs.execute(insert_values)
conn.commit()
# 1. Count how many rows you have - it should be 3!
count_query = """SELECT COUNT(x)
FROM demo;"""
count_result = curs.execute(count_query)
print('1. ')
print('There are',count_result.fetchall()[0][0],'rows in the table.')
print('\n')
# 2. How many rows are there where both x and y are at least 5?
rows_query = """SELECT COUNT(*)
FROM demo
WHERE x >= 5 AND y >= 5;"""
rows_result = curs.execute(rows_query )
print('2. ')
print('There are', rows_result.fetchall()[0][0],
'rows where both x and y are at least 5.')
print('\n')
# 3. How many unique values of y are there
# (hint - COUNT() can accept a keyword DISTINCT)?
y_query = """SELECT COUNT(DISTINCT y)
FROM demo;"""
y_result = curs.execute(y_query )
print('3. ')
print('There are', y_result.fetchall()[0][0], 'unique values of y.')
print('\n')
| StarcoderdataPython |
177634 | <filename>genericclient_base/__init__.py
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from . import exceptions, utils
from .response import ParsedResponse # noqa
from .routes import DetailRoute, ListRoute
_version = "1.4.2"
__version__ = VERSION = tuple(map(int, _version.split('.')))
AmbiguousComparison = exceptions.AmbiguousComparison
MultipleResourcesFound = exceptions.MultipleResourcesFound
ResourceNotFound = exceptions.ResourceNotFound
HTTPError = exceptions.HTTPError
NotAuthenticatedError = exceptions.NotAuthenticatedError
BadRequestError = exceptions.BadRequestError
UnknownPK = exceptions.UnknownPK
class BaseResource(object):
whitelist = (
'__class__',
'_endpoint',
'payload',
'response',
'save',
'delete',
'_urljoin',
)
def __init__(self, endpoint, response=None, **kwargs):
self._endpoint = endpoint
self.payload = kwargs
self.response = response
super(BaseResource, self).__init__()
def __setattr__(self, name, value):
if name == 'whitelist' or name in self.whitelist:
return super(BaseResource, self).__setattr__(name, value)
if isinstance(value, self.__class__) and hasattr(value, 'pk'):
value = value.pk
self.payload[name] = value
def __getattr__(self, name):
if name not in self.payload:
raise AttributeError("{} on endpoint `{}` has not attribute '{}'".format(
self.__class__.__name__,
self._endpoint.name,
name,
))
return self.payload[name]
def __repr__(self):
return '<{0} `{1}` {2}: {3}>'.format(
self.__class__.__name__, self._endpoint.name, self.pk_name, self.pk,
)
def __eq__(self, other):
if self.payload != other.payload and self.pk == other.pk:
raise AmbiguousComparison(
"Payloads are different, but {}:{} is the same.".format(
self.pk_name, self.pk
)
)
return self.payload == other.payload
@property
def pk_name(self):
pk_name = None
if 'id' in self.payload:
pk_name = 'id'
elif 'uuid' in self.payload:
pk_name = 'uuid'
return pk_name
@property
def pk(self):
if self.pk_name is not None:
return self.payload.get(self.pk_name)
return None
def _urljoin(self, *parts):
return utils.urljoin(self._endpoint.url, parts, self._endpoint.trail)
def save(self):
if self.pk is not None:
url = self._urljoin(self.pk)
try:
response = self._endpoint.request('put', url, json=self.payload)
except exceptions.BadRequestError:
response = self._endpoint.request('patch', url, json=self.payload)
else:
response = self._endpoint.request('post', self._endpoint.url, json=self.payload)
self.payload = response.data
return self
def delete(self):
url = self._urljoin(self.pk)
self._endpoint.request('delete', url)
class BaseResourceSet(list):
def __init__(self, response, items):
self.response = response
super(BaseResourceSet, self).__init__(items)
class BaseEndpoint(object):
resource_set_class = BaseResourceSet
resource_class = BaseResource
detail_route_class = DetailRoute
list_route_class = ListRoute
def __init__(self, api, name):
self.api = api
self.name = name
self.trail = self.api.trailing_slash
self.url = utils.urljoin(self.api.url, [name], self.trail)
super(BaseEndpoint, self).__init__()
def __call__(self, _method='post', **kwargs):
if kwargs:
return self.detail_route_class(self, _method, **kwargs)
else:
return self.list_route_class(self, _method)
def __repr__(self):
return "<{} `{}`>".format(self.__class__.__name__, self.url)
def _urljoin(self, *parts):
return utils.urljoin(self.url, parts, self.trail)
def filter(self, **kwargs):
params = kwargs.copy()
if self.api.autopaginate is not None:
response, results = self.api.autopaginate(self, params)
else:
response = self.request('get', self.url, params=params)
results = response.data
return self.resource_set_class(response, [self.resource_class(self, **result) for result in results])
def all(self):
return self.filter()
def get(self, **kwargs):
try:
pk = utils.find_pk(kwargs)
url = self._urljoin(pk)
response = self.request('get', url)
except exceptions.UnknownPK:
url = self.url
response = self.request('get', url, params=kwargs)
if response.status_code == 404:
raise exceptions.ResourceNotFound("No `{}` found for {}".format(self.name, kwargs))
result = response.data
if isinstance(result, list):
if len(result) == 0:
raise exceptions.ResourceNotFound("No `{}` found for {}".format(self.name, kwargs))
if len(result) > 1:
raise exceptions.MultipleResourcesFound("Found {} `{}` for {}".format(len(result), self.name, kwargs))
return self.resource_class(self, response, **result[0])
return self.resource_class(self, response, **result)
def create(self, payload):
response = self.request('post', self.url, json=payload)
if response.status_code != 201:
raise exceptions.HTTPError(response)
return self.resource_class(self, response, **response.data)
def get_or_create(self, **kwargs):
defaults = kwargs.pop('defaults', {})
try:
resource = self.get(**kwargs)
return resource
except exceptions.ResourceNotFound:
params = {k: v for k, v in kwargs.items()}
params.update(defaults)
return self.create(params)
def create_or_update(self, payload):
if 'id' in payload or 'uuid' in payload:
return self.resource_class(self, response=None, **payload).save()
return self.create(payload)
def delete(self, pk):
url = self._urljoin(pk)
response = self.request('delete', url)
if response.status_code == 404:
raise exceptions.ResourceNotFound("No `{}` found for pk {}".format(self.name, pk))
if response.status_code != 204:
raise exceptions.HTTPError(response)
return None
def request(self, method, url, *args, **kwargs):
# Must return an instance of ``genericclient_base.response.ParsedResponse``.
# Use ``self.api.hydrate_data`` to parse the response's body.
raise NotImplementedError
class BaseGenericClient(object):
endpoint_class = BaseEndpoint
endpoint_classes = {}
MultipleResourcesFound = MultipleResourcesFound
ResourceNotFound = ResourceNotFound
HTTPError = HTTPError
NotAuthenticatedError = NotAuthenticatedError
BadRequestError = BadRequestError
UnknownPK = UnknownPK
def __init__(self, url, auth=None, session=None, trailing_slash=False, autopaginate=None):
if not url.endswith('/'):
url = '{}/'.format(url)
self.url = url
self.auth = auth
self.trailing_slash = trailing_slash
self.autopaginate = autopaginate
self._session = session
super(BaseGenericClient, self).__init__()
def hydrate_data(self, response):
raise NotImplementedError
def make_session(self):
raise NotImplementedError
def get_or_create_session(self):
if self._session is None:
self._session = self.make_session()
return self._session
@property
def session(self):
return self.get_or_create_session()
@property
def host(self):
scheme, netloc, path, _, query, _ = urlparse(
self.url,
)
return netloc
def __getattr__(self, name):
if name in self.endpoint_classes:
return self.endpoint_classes[name](self, name)
return self.endpoint_class(self, name)
def __getitem__(self, item):
if item in self.endpoint_classes:
return self.endpoint_classes[item](self, item)
return self.endpoint_class(self, item)
| StarcoderdataPython |
5083591 | from starlette import responses, status
from endpoints import healthcheck
def test_database_status_without_exception():
expected = 'UP'
response = responses.Response()
actual = healthcheck.database(response, MockSession())
assert expected == actual['status']
assert status.HTTP_200_OK == response.status_code
def test_database_status_with_exception():
expected = 'DOWN'
response = responses.Response()
actual = healthcheck.database(response, MockSession(True))
assert expected == actual['status']
assert status.HTTP_503_SERVICE_UNAVAILABLE == response.status_code
class MockSession:
def __init__(self, should_fail=False):
self.should_fail = should_fail
def execute(self, sql):
if self.should_fail:
raise Exception('execute failed intentionally')
return None
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.