index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,400 | f7c07126b03a5551c8a511784ef4853840d2c660 | # 目描述:
# 我们现在需要在一个二维网格上画一个封闭图形,你有两种操作:
# 1. 连接一个1*1格子的对角线。
# 2. 连接一个1*1格子的一条边。
# 已知你每分钟只能选择一个操作,现在要求你画出一个面积至少为m的多边形,请问你至少需要多长时间?
# 输入
# 输入第一行是一个数据组数T。(1<=T<=300)
# 接下来有T行,每行一个正整数q,表示多边形至少要涵盖的面积(1<=q<=10^9)。
# 输出
# 对于每一个q,输出一行,表示至少需要多少分钟。
# 样例输入
# 5
# 1
# 2
# 3
# 4
# 5
# 样例输出
# 4
# 4
# 6
# 6
# 7 |
983,401 | 1f6f2bd69f67c5ef6169c3d226af94c200328b6d | from functools import lru_cache
class Solution:
def countPalindromicSubsequences(self, S: str) -> int:
@lru_cache(None)
def dfs(S, start, end):
cnt = 0
for ch in 'abcd':
i = S.find(ch, start, end)
j = S.rfind(ch, start, end)
if i != -1 and j != -1:
cnt += dfs(S, i + 1, j) + (2 if i != j else 1)
return cnt
return dfs(S, 0, len(S)) % (10**9 + 7)
|
983,402 | 2814894703ec98304052e14ff93a14009889fe07 | #!/usr/bin/env python
'''code description'''
# pylint: disable = I0011, E0401, C0103, C0321
class Solution(object):
'''Solution description'''
def maxCoins(self, nums):
'''Solution function description'''
nums=[1]+nums+[1]
n=len(nums)
ans=[[0]*n for _ in range(n)]
for interval in range(2,n):
for start in range(n-interval):
end=start+interval
ans[start][end]=max([nums[start]*nums[mid]*nums[end]+ans[start][mid]+ans[mid][end] for mid in range(start+1,end)])
return ans[0][n-1]
def main():
'''main function'''
_solution = Solution()
inp = [[3,1,5,8]]
for i in inp:
print(_solution.maxCoins(i))
if __name__ == "__main__":
main()
|
983,403 | 7624ae8481092c06efbbdcbc381ac62c19037e7d | import discord
from hogwarts import SortingHat
def get_embed(house,title):
desc = get_bold(house)
color = SortingHat.getColor(house)
embed = discord.Embed(title=title,description=desc,color=color)
url = SortingHat.getHouseIcon(house)
embed.set_image(url=url)
return embed
def get_bold(house):
return "**"+house+"**"
def make_embed(message):
return discord.Embed(title="",description=message) |
983,404 | 248bb9e8111087e333c4ea3e8d68e38a2709d51a | class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dp = [1] * len(nums)
for i in range(0, len(nums)):
for j in range(i+1, len(nums)):
if nums[i] < nums[j]:
dp[j] = max(dp[j], dp[i] + 1)
if dp[j] >= 3:
return True
return False
|
983,405 | 2f17a8ab3ebf818573063ffcc431e09153bc87a5 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-06 01:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('about', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='about',
name='biodata',
field=models.CharField(max_length=100),
),
]
|
983,406 | 0626d31a2cc99300610d23d86ed6624dcfef3630 | from django.db import models
from login_app.models import User
# Create your models here.
class ItemManager(models.Manager):
def validator_fields(self, postData):
errors = {}
if ( len(postData['article'].strip()) < 3 ):
errors['article_len'] = 'Nombre del artículo debe tener al menos 3 caracteres'
return errors
class Item(models.Model):
article = models.CharField(max_length=128, blank=False, null=False)
creator = models.ForeignKey(User, related_name='myitems', on_delete=models.CASCADE)
wisher = models.ManyToManyField(User, related_name='mywhyshes', blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
objects = ItemManager()
def __repr__(self) -> str:
return self.article
def __str__(self) -> str:
return self.article
|
983,407 | 366d4e8eb053e2a5897d08f9bed34bf838e12886 | from . import exceptions
class Validator:
message = 'Must be {value}, (is {input})'
value = None
def __init__(self, value, message=None):
self.value = value
if message is not None:
self.message = message
cmp = lambda self, x, y: x is not y
key = lambda self, x: x
def __call__(self, input):
key = self.key(input)
if self.cmp(key, self.value):
raise exceptions.Invalid(
self.message.format(value=self.value, input=input, key=key))
class Min(Validator):
message = "Can't be less than {value}, (is {input})"
cmp = lambda self, x, y: x < y
class Max(Validator):
message = "Can't be more than {value}, (is {input})"
cmp = lambda self, x, y: x > y
class Length(Validator):
message = "Must be {value} long, (of {input} is {key})"
cmp = lambda self, x, y: x != y
key = lambda self, x: len(x)
class MinLength(Min, Length):
message = "Can't be shorter than {value}, (of {input} is {key})"
class MaxLength(Max, Length):
message = "Can't be longer than {value}, ({input} is {key})"
class Regex(Validator):
message = "Does not match pattern"
def run_validators(validators, value):
messages = []
for validator in validators:
try:
validator(value)
except exceptions.Invalid as e:
messages.append(str(e)) #TODO: proper error messages
if messages:
raise exceptions.Invalid(messages)
|
983,408 | 00d513411b99945d431740b0ebc91543dcf60a86 | #!/usr/bin/env python
#coding:utf-8
import json
import sys
import util.walk as walk
import util.util as util
import os.path
import os
import subprocess
import tempfile
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
import logging
try:
import argparse
except ImportError:
logging.error('please use use python version >= 2.7')
exit(-1)
import binascii
import sys
mswindows = (sys.platform == "win32")
len2 = 16
exe_name = 'xdeltatool.exe'
input_folder = ''
src_folders = []
control_json = 'control.json'
ARMORY_META = 'armory_meta.json'
#calucate file's md5,return 32bit md5 string
#if error,return None
def calc_md5(file_name1):
def str_convert(s2):
assert(len(s2) == len2)
rt = ''
for each in s2:
c1 = ord(each)
s3 = '%02X'%(c1)
rt += s3
return rt
import md5,os.path
f1 = None
prev = 0
try:
if os.path.getsize(file_name1) == 0:#zero size file
return None,None
m1 = md5.new()
f1 = open(file_name1,'rb')#read mode,binary mode(for windows)
len1 = 1024*32#buffer size
while 1:
tmp_buffer = f1.read(len1)
m1.update(tmp_buffer)
prev = binascii.crc32(tmp_buffer, prev)
if len(tmp_buffer) != len1:#file end
break
s1 = m1.digest()
return str_convert(s1),prev & 0xFFFFFFFF#to 16
except Exception as e:#Exception
logging.error('calc md5 of %s failed:%s'%(file_name1,str(e)))
return None,None
finally:
if f1 is not None:f1.close()
#use xdeltatool.exe to encode or decode
#code = 1,encode;code not 1 decode
def xdelta_code(input_name,output_name,src_name = None,code = 1):#src could be none
import subprocess
import sys
module_path = os.path.dirname(__file__)
parent_path = \
os.path.abspath(os.path.join(module_path, os.path.pardir))
tmp_name = os.path.join(parent_path,exe_name)
logging.debug('tmp_name is ' + tmp_name)
args = []
args.append(tmp_name)
if not code == 1:
args.append('-d')
else:
args.append('-e')
args.append('-v')
if src_name is not None:
args.append('-s')
args.append(src_name)
args.append(input_name)
args.append(output_name)
logging.debug(args)
try:
pipe = subprocess.Popen(args,bufsize = 0,\
stdout = sys.stdout,\
stderr = subprocess.STDOUT)
except Exception as e:
logging.error(str(e))
return False
while 1:
if pipe.returncode is None:
pipe.poll()
else:
break
if not 0 == pipe.returncode:
logging.error('xdelta error')
return False
return True
#win version
if mswindows:
def soft_link_file(origin,link):
def valid_str(str1):
str1 = str1.replace('"',r'\"')
return str1
import subprocess
origin = valid_str(origin)
link = valid_str(link)
origin = '"' + origin + '"'
link = '"' + link + '"'
args = 'ln -s' + ' ' + origin + ' ' + link
#args = ['ln','-s',origin,link]
args1 = 'rm ' + link
pipe1 = subprocess.Popen(args1,bufsize = 4096,stdout = subprocess.PIPE,\
stderr = subprocess.PIPE)#check out reversion queitly
pipe1.communicate()
pipe = subprocess.Popen(args,bufsize = 4096,stdout = subprocess.PIPE,\
stderr = subprocess.PIPE)#check out reversion queitly
out,err = pipe.communicate()
if 0 == pipe.returncode:
return True
else:#error
logging.warning(err)
logging.debug('origin is ' + origin)
logging.debug('link is ' + link)
return False
else:
def soft_link_file(origin,link):
import subprocess
import logging
args = ["ln","-s",origin,link]
pipe = subprocess.Popen(args,bufsize = 4096,stdout = subprocess.PIPE,\
stderr = subprocess.PIPE)#check out reversion queitly
out,err = pipe.communicate()
if 0 == pipe.returncode:
return True
else:#error
logging.error(err)
logging.error('origin is ' + origin)
logging.error('link is ' + link)
return False
#url:full svn url(chunk or branch)
#version(not revision):folder name
def check_out_version(url,folder):
import subprocess
args = ['svn','co','--force',url,folder]
try:
pipe = subprocess.Popen(args,bufsize = 0,\
stdout = sys.stdout,\
stderr = subprocess.STDOUT)
except Exception as e:
logging.error(str(e))
return False
while 1:
if pipe.returncode is None:
pipe.poll()
else:
break
if not 0 == pipe.returncode:
logging.error('check out of %s failed:%s'%(url))
return False
return True
#def export_version(url,folder):
# import subprocess
# args = ['svn','export','-q','--force',url,folder]
# pipe = subprocess.Popen(args,bufsize = 4096,stdout = subprocess.PIPE,\
# stderr = subprocess.PIPE)#check out reversion queitly
# out,err = pipe.communicate()
# if 0 == pipe.returncode:
# return True
# else:#error
# logging.error('export of %s failed:%s'%(url,err))
# return False
def diff_between_urls(url1,url2): # r1 -> r2
import subprocess
args = ['svn','di','--summarize']
old = ''.join(['--old=',url1])
new = ''.join(['--new=',url2])
args.append(old)
args.append(new)
rv = ''
try:
pipe = subprocess.Popen(args,bufsize = 4096,stdout = subprocess.PIPE,\
stderr = subprocess.PIPE)
except Exception as e:
logging.error(str(e))
return None
while 1:
s = pipe.stdout.read()
if s:
rv += s
if pipe.returncode is None:
pipe.poll()
else:
break
if not 0 == pipe.returncode:
return None
return rv
def combine_url_at_rev(url,revision):
return ''.join([url,'@',revision])
#def get_svn_file_size(url,username,password):
# import urllib2, base64
# class HeadRequest(urllib2.Request):
# def get_method(self):
# return "HEAD"
#
# request = HeadRequest(url)
# base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n','')
# request.add_header("Authorization", "Basic %s" % base64string)
# try:
# result = urllib2.urlopen(request)
# size = result.headers.get('content-length',None)
# #seems svn apache don't give content length header when file's size is 0
# if size is None:
# return 0
# return size
# except:
# return None
# finally:
# result.close()
def get_modified_from_svn_diff_line(s1):
pattern = r"\s*M\s+(.+)$"
return get_url_from_svn_line(s1,pattern)
def get_deleted_from_svn_diff_line(s1):
pattern = r"\s*D\s+(.+)$"
return get_url_from_svn_line(s1,pattern)
def get_added_from_svn_diff_line(s1):
pattern = r"\s*A\s+(.+)$"
return get_url_from_svn_line(s1,pattern)
def get_url_from_svn_line(s1,pattern):
import re
if 0 == len(s1):
return None
o1 = re.search(pattern,s1)
if o1 is not None:
rv = o1.group(1)
if rv.endswith('\r'):
rv = rv[:-1]
return rv
return None
def unicode_to_utf8(o1):
if isinstance(o1,unicode):
return o1.encode('utf-8')
return o1
def my_quote_plus(url):
import urllib
return urllib.quote_plus(url).replace('+','%20')
def quote_path(path):
l1 = path.split('/')
l2 = []
for each in l1:
l2.append(my_quote_plus(each))
return '/'.join(l2)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--logname',help='set log file name')
parser.add_argument('-j','--diffjson',help='diffs.json file name',\
required=True)
parser.add_argument('--verbose',help='print extra info',\
action='store_true')
parser.add_argument('--outputfolder',help='print extra info')
args = parser.parse_args()
if args.outputfolder is None:
args.outputfolder = '../.portal'#parent folder
return args
def main(slave = False,logname = None,diffjson = '',verbose = False,\
outputfolder = '../.portal'):
if slave:
args_logname = logname
args_diffjson = diffjson
args_verbose = verbose
args_outputfolder = outputfolder
else:
args = get_args()
args_logname = args.logname
args_diffjson = args.diffjson
args_verbose = args.verbose
args_outputfolder = args.outputfolder
svn_user = 'db.zhang'
svn_pwd = 'zkf123456'
latest_diffs = {}
if args_verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\
datefmt='%m/%d/%Y %I:%M:%S %p',\
level=level,
filename=args_logname)
all_versions = {}
svn_url = util.get_game_svn_url('.')
def load_armory_meta(svn_url):
tmpfile = tempfile.mktemp()
if not util.export_svn_file_to_local(svn_url + '/' + ARMORY_META,tmpfile):
return None
armory_meta_obj = None
f = None
try:
f = open(tmpfile,'rb')
armory_meta_obj = json.load(f)
except Exception as e:
logging.debug(str(e))
finally:
if f is not None:f.close()
return armory_meta_obj
armory_meta_obj = load_armory_meta(svn_url)
if armory_meta_obj is None:
logging.error('could not load '+ ARMORY_META)
return -1
config_obj = armory_meta_obj['config']
versions = config_obj['versions']
version1 = 1
no_delta = False
if len(versions['main']) == 1:
no_delta = True
if not version1 in versions['main']:
versions['main'].append(version1)
newest_num = str(max(versions['main']))
game_name = unicode_to_utf8(config_obj['game_name'])
root_url = unicode_to_utf8(config_obj['svn_url'])
armories_url = unicode_to_utf8(config_obj['armories_url'])
package_mirrors = unicode_to_utf8(config_obj['package_mirrors'])
base_mirrors = unicode_to_utf8(config_obj['base_mirrors'])
patch_mirrors = unicode_to_utf8(config_obj['patch_mirrors'])
appid = config_obj['appid']
delta_folder = ''.join([appid,'/','base','/',newest_num])
first_folder = ''.join([appid,'/','base','/','1'])
tmp_folder = ''.join([appid,'/','.',appid])
delta_folder = os.path.join(args_outputfolder,delta_folder)
first_folder = os.path.join(args_outputfolder,first_folder)
tmp_folder = os.path.join(args_outputfolder,tmp_folder)
if not versions.has_key('main'):
logging.error('must have a main version')
return -1
newest_version = 'main',newest_num
for each in versions.keys():
if versions[each] is None:
continue
for num in versions[each]:
str_num = str(num)
each = str(each)
if (each,str_num) in all_versions:
continue
#r_num = util.find_revision(log,game_name,each,str_num)
r_num = util.find_revision_from_armory_meta(armory_meta_obj,\
str_num,each)
if r_num is None:
logging.error('failed to find revision %s of',str_num)
return -1
all_versions[each,str_num] = r_num
#main 1st version num
#first_num = util.find_revision(log,game_name,'main','1')
first_num = util.find_revision_from_armory_meta(armory_meta_obj,'1','main')
if first_num is None:
logging.error('failed to find revision %s of',1)
return -1
#try:
# util.simple_remove_dir(tmp_folder)
#except:
# pass
#util.simple_make_dirs(tmp_folder)
nv_url = combine_url_at_rev(util.combine_trunk_url(svn_url,''),\
all_versions[newest_version])
input_folder = delta_folder
#try:
# util.simple_remove_dir(input_folder)
#except:
# pass
#input_folder = tmp_folder + '/' + 'main' + all_versions[newest_version]
#if not check_out_version(nv_url,input_folder):
# return -1
first_url = combine_url_at_rev(util.combine_trunk_url(svn_url,''),first_num)
#always check out 1st version
if not util.simple_path_exists(first_folder):
logging.info('copy first version from local')
util.simple_copy_folder('./trunk',first_folder)
if not check_out_version(first_url,first_folder):
return -1
d_to_xdelta = {}
def download_lastest_file(path):
rel_name = os.path.join(input_folder,path)
if util.simple_path_exists(rel_name):
return
folder_name,base_name = os.path.split(rel_name)
util.simple_make_dirs(folder_name)
tmp_http_url = util.convert_svnurl_to_httpurl(nv_url,root_url)#svn_url is root path
if not tmp_http_url.endswith('/'):
tmp_http_url += '/'
download_url = tmp_http_url + quote_path(path)
logging.info('latest: ' + download_url)
logging.debug(nv_url)
nv_svn_url = combine_url_at_rev(util.combine_trunk_url(svn_url,'') + \
quote_path(path),\
all_versions[newest_version])
#logging.info(nv_svn_url)
#logging.info(rel_name)
#logging.info(util.simple_path_exists(rel_name))
#util.export_svn_file_to_local(nv_svn_url,rel_name)
rv = util.download_svn_file(download_url,rel_name,svn_user,svn_pwd)#download old file to folder
if not rv:#retry
rv = util.download_svn_file(download_url,rel_name,svn_user,svn_pwd)#download old file to folder
lastest_changed = []
for each in all_versions.keys():
if each == newest_version:
continue
tmp_revision = all_versions[each]
if each[0] == 'main':
base_url = util.combine_trunk_url(svn_url,'')
else:
base_url = util.combine_branch_url(svn_url,each[0],'')
tmp_url = combine_url_at_rev(base_url,tmp_revision)
s1 = diff_between_urls(tmp_url,nv_url)
logging.debug(s1)
if s1 is not None:
diff_to_newest_key = each[1] if each[0] == 'main' else None
if diff_to_newest_key is not None:
latest_diffs[diff_to_newest_key] = {}
latest_diffs[diff_to_newest_key]['to_modify'] = {}
latest_diffs[diff_to_newest_key]['to_delete'] = []
latest_diffs[diff_to_newest_key]['to_add'] = []
logging.info(''.join([each[0],each[1],'->',\
newest_version[0],newest_version[1]]))
l1 = s1.split('\n')
for diff in l1:
m1 = get_modified_from_svn_diff_line(diff)
logging.debug('m1 is ' + str(m1))
if m1 is not None:
attr = util.get_svn_url_attr(combine_url_at_rev(m1,all_versions[newest_version]))
assert(attr is not None)
#print 'attr is ',attr
if util.FOLDER == attr:
continue #on windows,blind to directory,just pass
if not m1.startswith(base_url):
assert(0)
m1 = m1.replace(base_url,'')
if not d_to_xdelta.has_key(each):
d_to_xdelta[each] = []
tmp_http_url = util.convert_svnurl_to_httpurl(tmp_url,root_url)#svn_url is root path
if not tmp_http_url.endswith('/'):
tmp_http_url += '/'
#d_to_xdelta[each].append((m1,tmp_http_url + m1))
rel_name = os.path.join(tmp_folder + '/' + each[0] + each[1]+'/',m1)
folder_name,base_name = os.path.split(rel_name)
util.simple_make_dirs(folder_name)
m1 = m1.decode(util.simple_get_encoding()).encode('utf-8')
if diff_to_newest_key is not None:
latest_diffs[diff_to_newest_key]['to_modify'][m1] = None
download_url = tmp_http_url + quote_path(m1)
tmp_svn_url = combine_url_at_rev(base_url + quote_path(m1),\
tmp_revision)
#logging.info(tmp_svn_url)
#logging.info(rel_name)
#util.export_svn_file_to_local(tmp_svn_url,rel_name)
logging.info(download_url)
rv = util.download_svn_file(download_url,rel_name,svn_user,svn_pwd)#download old file to folder
if not rv:#retry
rv = util.download_svn_file(download_url,rel_name,svn_user,svn_pwd)#download old file to folder
#also download the related latest version file
download_lastest_file(m1)
m1 = m1.replace('\\','/')
if m1 not in lastest_changed:
lastest_changed.append(m1)
m2 = get_added_from_svn_diff_line(diff)
if m2 is not None:
attr = util.get_svn_url_attr(combine_url_at_rev(m2,all_versions[newest_version]))
assert(attr is not None)
if util.FOLDER == attr:
continue
if diff_to_newest_key is not None:
m2 = m2.replace(base_url,'')
m2 = m2.decode(util.simple_get_encoding())
latest_diffs[diff_to_newest_key]['to_add'].append(m2)
download_lastest_file(m2)
m2 = m2.replace('\\','/')
if m2 not in lastest_changed:
lastest_changed.append(m2)
m3 = get_deleted_from_svn_diff_line(diff)
if m3 is not None:
if diff_to_newest_key is not None:
m3 = m3.replace(base_url,'')
m3 = m3.decode(util.simple_get_encoding())
latest_diffs[diff_to_newest_key]['to_delete'].append(m3)
src_folders = []
for each in d_to_xdelta.keys():
version_name = each[1] if each[0] == 'main' else each[0] + '_' + each[1]
t1 = each[0] + each[1],version_name
src_folders.append(t1)
logging.info(d_to_xdelta.keys())
latest_diffs[newest_version[1]] = []
latest_diffs['latest'] = newest_version[1]
def make_diffs(file_name):
if not file_name.find('.svn/') == -1:#ignore svn folder
return
coding = util.simple_get_encoding()
entry1 = {}
file_name = file_name.replace('\\','/')
if file_name not in lastest_changed:
return
abs_input_name = input_folder + '/' + file_name
entry1['size'] = util.simple_getsize(abs_input_name)#zero size input file
entry1['name'] = file_name.decode(coding)
if entry1['size'] != 0:
entry1['hash'],entry1['crc32']= calc_md5(abs_input_name)
srcs = []
for each,version_name in src_folders:#each source folder,try to get delta
src_file1 = tmp_folder + '/' + each + '/' + file_name
if util.simple_path_exists(src_file1):#src exists
if util.simple_getsize(src_file1) != 0:#zero size file not having md5,skip it
src_md5 = calc_md5(src_file1)[0]
#if md5 already exist
#or the same with input file,continue
output_name = ''.join([file_name,'-',version_name,'-',\
newest_version[1],'.delta'])
if src_md5 not in srcs and entry1['hash'] != src_md5:
if no_delta:
continue
logging.info('encoding...')
logging.info('input:' + abs_input_name)
logging.info('src:' + src_file1)
logging.info('output:' + output_name)
xdelta_code(abs_input_name,\
delta_folder + '/' + output_name,\
src_file1,1)#encode,generate a xdelta file
xdelta_size = util.simple_getsize(delta_folder +\
'/' + output_name)
#print each,type(each)
xdelta_dict = {}
#name should be a unicode object
xdelta_dict['name'] = output_name.decode(coding)
xdelta_dict['size'] = xdelta_size
xdelta_dict['hash'] = src_md5
srcs.append(xdelta_dict)
if each.startswith('main'):
latest_diffs[each[4:]]['to_modify'][file_name.decode(coding).encode('utf-8')] = xdelta_dict
if len(srcs):
entry1['deltas'] = srcs
latest_diffs[newest_version[1]].append(entry1)
latest_diffs['delta_folder'] = delta_folder
latest_diffs['appid'] = appid
walk.walkutil(input_folder,None,make_diffs)
def replace_with_doubledot(path):
import re
return re.sub(r'[^/]+','..',path)
base_folder = ''.join([appid,'/','base'])
base_folder = os.path.join(args_outputfolder,base_folder).replace('\\','/')
nv_list = util.get_svn_url_list(nv_url,True)
for sth in nv_list:
input_path = os.path.join(input_folder,sth).replace('\\','/')
if util.simple_path_exists(input_path) and \
sth.replace('\\','/') in lastest_changed:
continue
else:
first_path = os.path.join(first_folder,sth).replace('\\','/')
#files not change,make a soft link to 1st version file
if util.simple_path_exists(first_path):
if os.path.isfile(first_path):
tmp_path = first_path.replace(base_folder + '/1/','',1)
folder_name,base_name = os.path.split(tmp_path)
parent_folder_name = os.path.join(replace_with_doubledot(folder_name),'../1')
folder_name = os.path.join(parent_folder_name,folder_name)
tmp_path = os.path.join(folder_name,base_name).replace('\\','/')
soft_link_file(tmp_path,input_path)
#update diffs.json
coding = util.simple_get_encoding()
entry1 = {}
entry1['size'] = util.simple_getsize(first_path)#zero size input file
entry1['name'] = sth.decode(coding)
if entry1['size'] != 0:
entry1['hash'],entry1['crc32'] = calc_md5(first_path)
latest_diffs[newest_version[1]].append(entry1)
else:
util.simple_make_dirs(input_path)
else:
pass
#assert(0)
tmp = sth
tmp = tmp.decode(util.simple_get_encoding())
download_lastest_file(tmp)
diffjson_path = os.path.join(args_outputfolder,args_diffjson)
f1 = open(diffjson_path,'w+b')#new json file
json.dump(latest_diffs,f1,indent=4)#dump json
f1.close()
control_obj = {}
control_obj['root'] = util.simle_join_path(armories_url,appid)
control_obj['package_mirrors'] = []
control_obj['patch_mirrors'] = []
control_obj['base_mirrors'] = []
for each in package_mirrors:
control_obj['package_mirrors'].append(util.simle_join_path(each,appid))
for each in patch_mirrors:
control_obj['patch_mirrors'].append(util.simle_join_path(each,appid))
for each in base_mirrors:
control_obj['base_mirrors'].append(util.simle_join_path(each,appid))
control_obj['latest'] = latest_diffs['latest']
control_obj['base'] = {}
control_obj['base'][newest_version[1]] = latest_diffs[newest_version[1]]
for each in control_obj['base'][newest_version[1]]:
if each.has_key('crc32'):
each.pop('crc32')
control_json_path = \
os.path.join(args_outputfolder,appid + '/' + control_json)
f1 = open(control_json_path,'w+b')#new json file
json.dump(control_obj,f1,indent=4)#dump json
f1.close()
logging.info('armory generation successful')
if __name__ == "__main__":#main entry
main()
|
983,409 | 60f5d1126a35d552e2515013c293a985f5cc6669 | import json
from core.drf.exceptions import ConflictException
from django import forms
from django.db import connection, transaction, IntegrityError
from django.core.paginator import Paginator
from django.utils.crypto import get_random_string
from rest_framework import viewsets, mixins, status
from rest_framework.decorators import action, link
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response as DRFResponse
from rest_framework.filters import OrderingFilter, DjangoFilterBackend
from rest_framework_extensions.mixins import DetailSerializerMixin
from core.utils import format_patch
from core.drf.mixins import NestedGenericModelMixin, JsonPatchViewSetMixin, FormActionMixin
from core.drf.pagination import RelativeUrlPaginationSerializer
from legalaid.permissions import IsManagerOrMePermission
from cla_eventlog import event_registry
from cla_auth.models import AccessAttempt
from .serializers import (
CategorySerializerBase,
MatterTypeSerializerBase,
MediaCodeSerializerBase,
PersonalDetailsSerializerFull,
ThirdPartyDetailsSerializerBase,
AdaptationDetailsSerializerBase,
CaseSerializerBase,
FeedbackSerializerBase,
CaseNotesHistorySerializerBase,
CSVUploadSerializerBase,
EODDetailsSerializerBase,
ContactResearchMethodSerializerBase,
)
from cla_provider.models import Feedback, CSVUpload
from .models import (
Case,
Category,
EligibilityCheck,
MatterType,
MediaCode,
PersonalDetails,
ThirdPartyDetails,
AdaptationDetails,
CaseNotesHistory,
EODDetails,
ContactResearchMethod,
)
class CaseFormActionMixin(FormActionMixin):
"""
This is for backward compatibility
"""
FORM_ACTION_OBJ_PARAM = "case"
class PasswordResetForm(forms.Form):
new_password = forms.CharField(required=True, min_length=10)
old_password = forms.CharField(required=False)
def __init__(self, case=None, *args, **kwargs):
self.action_user = kwargs.pop("action_user")
self.reset_user = kwargs.pop("reset_user")
super(PasswordResetForm, self).__init__(*args, **kwargs)
if self.action_user == self.reset_user:
self.fields["old_password"].required = True
def clean_old_password(self):
old_password = self.cleaned_data["old_password"]
if self.action_user == self.reset_user:
# changing own password
if not self.reset_user.check_password(old_password):
raise PermissionDenied({"__all__": ["Old password doesn't match."]})
return old_password
def save(self, _):
new_password = self.cleaned_data["new_password"]
self.reset_user.set_password(new_password)
self.reset_user.save()
class BaseUserViewSet(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
CaseFormActionMixin,
viewsets.GenericViewSet,
):
permission_classes = (IsManagerOrMePermission,)
me_lookup_url_kwargs = "me"
lookup_field = "user__username"
def get_queryset(self):
qs = super(BaseUserViewSet, self).get_queryset()
return qs.filter(user__is_active=True)
def get_logged_in_user_model(self):
raise NotImplementedError()
def get_object(self, *args, **kwargs):
"""
Lock the object every time it's requested
"""
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup = self.kwargs.get(lookup_url_kwarg, None)
# for now, you can only access to the user/me/ object, for security
# reasons. We'll probably change this in the future to allow service
# managers to add/update/delete users from their area.
# logged_in_user_model = self.get_logged_in_user_model()
if lookup == self.me_lookup_url_kwargs:
self.kwargs[lookup_url_kwarg] = self.request.user.username
obj = super(BaseUserViewSet, self).get_object(*args, **kwargs)
self.check_object_permissions(self.request, obj)
return obj
@action()
def password_reset(self, request, *args, **kwargs):
user = self.get_object().user
try:
return self._form_action(
request, PasswordResetForm, no_body=True, form_kwargs={"action_user": request.user, "reset_user": user}
)
except PermissionDenied as pd:
return DRFResponse(pd.detail, status=status.HTTP_403_FORBIDDEN)
@action()
def reset_lockout(self, request, *args, **kwargs):
logged_in_user_model = self.get_logged_in_user_model()
if not logged_in_user_model.is_manager:
raise PermissionDenied()
user = self.get_object().user
AccessAttempt.objects.delete_for_username(user.username)
return DRFResponse(status=status.HTTP_204_NO_CONTENT)
def list(self, request, *args, **kwargs):
if not self.get_logged_in_user_model().is_manager:
raise PermissionDenied()
return super(BaseUserViewSet, self).list(request, *args, **kwargs)
@transaction.atomic
def create(self, request, *args, **kwargs):
obj = super(BaseUserViewSet, self).create(request, *args, **kwargs)
self.check_object_permissions(request, obj)
return obj
class BaseCategoryViewSet(viewsets.ReadOnlyModelViewSet):
model = Category
serializer_class = CategorySerializerBase
lookup_field = "code"
class BaseEligibilityCheckViewSet(JsonPatchViewSetMixin, viewsets.GenericViewSet):
model = EligibilityCheck
lookup_field = "reference"
@link()
def validate(self, request, **kwargs):
obj = self.get_object()
return DRFResponse(obj.validate())
@action()
def is_eligible(self, request, *args, **kwargs):
obj = self.get_object()
response, ec, reasons = obj.get_eligibility_state()
return DRFResponse({"is_eligible": response})
def get_means_test_event_kwargs(self, kwargs):
return kwargs
def get_request_user(self):
return self.request.user
def create_means_test_log(self, obj, created):
try:
obj.case
except Case.DoesNotExist:
return
user = self.get_request_user()
means_test_event = event_registry.get_event("means_test")()
status = "changed" if not created else "created"
kwargs = {"created_by": user, "status": status, "context": {"state": obj.state}}
kwargs = self.get_means_test_event_kwargs(kwargs)
means_test_event.process(obj.case, **kwargs)
def post_save(self, obj, created=False, **kwargs):
super(BaseEligibilityCheckViewSet, self).post_save(obj, created=created)
self.create_means_test_log(obj, created=created)
return obj
class BaseNestedEligibilityCheckViewSet(NestedGenericModelMixin, BaseEligibilityCheckViewSet):
PARENT_FIELD = "eligibility_check"
def get_means_test_event_kwargs(self, kwargs):
patch = self.jsonpatch
kwargs.update({"patch": json.dumps(patch), "notes": format_patch(patch["forwards"])})
return kwargs
class BaseMatterTypeViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
model = MatterType
serializer_class = MatterTypeSerializerBase
filter_backends = (DjangoFilterBackend,)
filter_fields = ("level", "category__code")
class BaseMediaCodeViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
model = MediaCode
serializer_class = MediaCodeSerializerBase
filter_backends = (DjangoFilterBackend,)
filter_fields = ("name", "group__name")
class BaseContactResearchMethodViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
model = ContactResearchMethod
serializer_class = ContactResearchMethodSerializerBase
filter_backends = (DjangoFilterBackend,)
filter_fields = ("method",)
class FullPersonalDetailsViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
NestedGenericModelMixin,
viewsets.GenericViewSet,
):
model = PersonalDetails
serializer_class = PersonalDetailsSerializerFull
lookup_field = "reference"
PARENT_FIELD = "personal_details"
class BaseThirdPartyDetailsViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
NestedGenericModelMixin,
viewsets.GenericViewSet,
):
model = ThirdPartyDetails
serializer_class = ThirdPartyDetailsSerializerBase
lookup_field = "reference"
PARENT_FIELD = "thirdparty_details"
class BaseAdaptationDetailsViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
NestedGenericModelMixin,
viewsets.GenericViewSet,
):
model = AdaptationDetails
serializer_class = AdaptationDetailsSerializerBase
lookup_field = "reference"
PARENT_FIELD = "adaptation_details"
class BaseAdaptationDetailsMetadataViewSet(mixins.CreateModelMixin, viewsets.GenericViewSet):
model = AdaptationDetails
serializer_class = AdaptationDetailsSerializerBase
def create(self, request, *args, **kwargs):
self.http_method_not_allowed(request)
class BaseEODDetailsViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
NestedGenericModelMixin,
viewsets.GenericViewSet,
):
model = EODDetails
serializer_class = EODDetailsSerializerBase
lookup_field = "reference"
PARENT_FIELD = "eod_details"
def pre_save(self, obj):
# delete all existing EOD categories and use those from request as replacement set
if isinstance(obj, EODDetails) and obj.pk:
obj.categories.all().delete()
obj.case = Case.objects.get(reference=self.kwargs.get("case_reference"))
super(BaseEODDetailsViewSet, self).pre_save(obj)
def post_save(self, obj, created=False):
return super(BaseEODDetailsViewSet, self).post_save(obj, False)
class BaseCaseOrderingFilter(OrderingFilter):
default_modified = "modified"
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request)
if not ordering:
ordering = self.get_default_ordering(view)
if isinstance(ordering, basestring):
if "," in ordering:
ordering = ordering.split(",")
else:
ordering = [ordering]
if "requires_action_at" not in ordering:
ordering.append("requires_action_at")
if "modified" not in ordering:
ordering.append(self.default_modified)
ordering = self.remove_invalid_fields(queryset, ordering, view)
return queryset.order_by(*ordering)
class AscCaseOrderingFilter(BaseCaseOrderingFilter):
default_modified = "modified"
class DescCaseOrderingFilter(BaseCaseOrderingFilter):
default_modified = "-modified"
class BaseCaseLogMixin(object):
def get_log_notes(self, obj):
raise NotImplementedError()
def get_log_context(self, obj):
context = {}
if obj.eligibility_check:
context["eligibility_state"] = obj.eligibility_check.state
return context
def post_save(self, obj, created=False):
super(BaseCaseLogMixin, self).post_save(obj, created=created)
if created:
event = event_registry.get_event("case")()
event.process(
obj,
status="created",
created_by=obj.created_by,
notes=self.get_log_notes(obj),
context=self.get_log_context(obj),
)
class FullCaseViewSet(
DetailSerializerMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
CaseFormActionMixin,
viewsets.GenericViewSet,
):
model = Case
lookup_field = "reference"
lookup_regex = r"[A-Z|\d]{2}-\d{4}-\d{4}"
serializer_class = CaseSerializerBase
pagination_serializer_class = RelativeUrlPaginationSerializer
filter_backends = (AscCaseOrderingFilter,)
ordering_fields = (
"modified",
"personal_details__full_name",
"requires_action_at",
"personal_details__date_of_birth",
"personal_details__postcode",
"eligibility_check__category__name",
"priority",
"null_priority",
"flagged_with_eod",
"organisation__name",
)
ordering = ["-priority"]
paginate_by = 20
paginate_by_param = "page_size"
max_paginate_by = 100
FLAGGED_WITH_EOD_SQL = """
SELECT COUNT(id) > 0 FROM legalaid_eoddetails
WHERE legalaid_case.id = legalaid_eoddetails.case_id
AND (
(
legalaid_eoddetails.notes IS NOT NULL
AND length(legalaid_eoddetails.notes) > 0
) OR (
SELECT COUNT(id) > 0
FROM legalaid_eoddetailscategory
WHERE legalaid_eoddetailscategory.eod_details_id=legalaid_eoddetails.id
)
)
"""
def get_search_terms(self):
"""
Search terms are set by a ?search=... query parameter,
and may be comma and/or whitespace delimited.
"""
params = self.request.QUERY_PARAMS.get("search", "")
return params.replace(",", " ").split()
def get_temporary_view_name(self):
return "case_search_view_{}".format(get_random_string())
def list(self, request, *args, **kwargs):
try:
return super(FullCaseViewSet, self).list(request, *args, **kwargs)
finally:
if hasattr(request, "temp_view_name"):
try:
with connection.cursor() as cursor:
cursor.execute('DROP VIEW "{view_name}"'.format(view_name=self.request.temp_view_name))
except Exception:
pass # whatever, it won't survive session end
def filter_queryset(self, queryset):
queryset = super(FullCaseViewSet, self).filter_queryset(queryset)
search_terms = self.get_search_terms()
if not search_terms:
return queryset
select_sql = """
(SELECT c.id FROM legalaid_case c
LEFT OUTER JOIN legalaid_personaldetails pd ON
c.personal_details_id=pd.id WHERE {where_clause})
"""
case_where_sql = """
UPPER(c.reference::text) LIKE UPPER(%s)
OR UPPER(c.laa_reference::text) LIKE UPPER(%s)
OR UPPER(c.search_field::text) LIKE UPPER(%s)
"""
personal_details_where_sql = """
UPPER(pd.full_name::text) LIKE UPPER(%s)
OR UPPER(pd.postcode::text) LIKE UPPER(%s)
OR UPPER(pd.street::text) LIKE UPPER(%s)
OR UPPER(pd.search_field::text) LIKE UPPER(%s)
"""
number_of_placeholders = 7
unions = []
params = []
for search_term in search_terms:
unions.append(
"({})".format(
" UNION ".join(
[
select_sql.format(where_clause=case_where_sql),
select_sql.format(where_clause=personal_details_where_sql),
]
)
)
)
for _ in range(number_of_placeholders):
params.append(u"%{}%".format(search_term))
subquery = " INTERSECT ".join(unions)
self.request.temp_view_name = self.get_temporary_view_name()
create_view_sql = 'CREATE TEMPORARY VIEW "{view_name}" AS {query}'.format(
view_name=self.request.temp_view_name, query=subquery
)
with connection.cursor() as cursor:
cursor.execute(create_view_sql, params)
return queryset.extra(
tables=[self.request.temp_view_name],
where=['"legalaid_case"."id"="{}"."id"'.format(self.request.temp_view_name)],
)
def get_queryset(self, **kwargs):
qs = super(FullCaseViewSet, self).get_queryset(**kwargs)
person_ref_param = self.request.QUERY_PARAMS.get("person_ref", None)
dashboard_param = self.request.QUERY_PARAMS.get("dashboard", None)
if person_ref_param:
qs = qs.filter(personal_details__reference=person_ref_param)
elif dashboard_param:
qs = self.get_dashboard_qs(qs)
qs = qs.extra(
select={
"null_priority": """CASE
WHEN legalaid_case.outcome_code IS NULL THEN 1
ELSE 0
END""",
"priority": """CASE legalaid_case.outcome_code
WHEN 'PCB' THEN 10
WHEN 'REF-EXT' THEN 8
WHEN 'IRCB' THEN 7
WHEN 'MIS' THEN 6
WHEN 'COI' THEN 5
WHEN 'CB1' THEN 4
WHEN 'CB2' THEN 3
WHEN 'CB3' THEN 2
ELSE 1
END""",
"rejected": """CASE
WHEN legalaid_case.outcome_code IN (
'COI', 'MIS')
THEN 1
ELSE 0
END""",
"flagged_with_eod": self.FLAGGED_WITH_EOD_SQL,
}
)
return qs
def get_dashboard_qs(self, qs):
return qs
def retrieve(self, request, *args, **kwargs):
resp = super(FullCaseViewSet, self).retrieve(request, *args, **kwargs)
event = event_registry.get_event("case")()
event.process(self.object, status="viewed", created_by=request.user, notes="Case viewed")
return resp
def pre_save(self, obj):
super(FullCaseViewSet, self).pre_save(obj)
if obj.pk:
if "notes" in obj.changed_fields:
cnh = CaseNotesHistory(case=obj)
cnh.operator_notes = obj.notes
cnh.created_by = self.request.user
cnh.save()
if "provider_notes" in obj.changed_fields:
cpnh = CaseNotesHistory(case=obj)
cpnh.provider_notes = obj.provider_notes
cpnh.created_by = self.request.user
cpnh.save()
if "complaint_flag" in obj.changed_fields:
event = event_registry.get_event("case")()
event.process(
obj,
status="complaint_flag_toggled",
created_by=self.request.user,
notes="Complaint flag toggled: %s" % obj.complaint_flag,
)
# if we want to add more checks on changed fields then we should
# probably refactor this method to look at a list on the view
# called 'action_on_changed_fields' and enumerate that and perform
# the appropriate thing instead of adding more stuff here
class BaseFeedbackViewSet(
NestedGenericModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
model = Feedback
serializer_class = FeedbackSerializerBase
PARENT_FIELD = "provider_feedback"
lookup_field = "reference"
class BaseCSVUploadReadOnlyViewSet(
DetailSerializerMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet
):
model = CSVUpload
serializer_class = CSVUploadSerializerBase
serializer_detail_class = CSVUploadSerializerBase
filter_backends = (OrderingFilter,)
class BaseCSVUploadViewSet(
DetailSerializerMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
model = CSVUpload
serializer_class = CSVUploadSerializerBase
serializer_detail_class = CSVUploadSerializerBase
filter_backends = (OrderingFilter,)
def create(self, request, *args, **kwargs):
try:
return super(BaseCSVUploadViewSet, self).create(request, *args, **kwargs)
except IntegrityError:
raise ConflictException("Upload already exists for given month. Try overwriting.")
def update(self, request, *args, **kwargs):
if request.method.upper() == u"PATCH":
# Don't allow PATCH because they should DELETE+POST or PUT
return self.http_method_not_allowed(request, *args, **kwargs)
return super(BaseCSVUploadViewSet, self).update(request, *args, **kwargs)
class PaginatorWithExtraItem(Paginator):
"""
Same as the Paginator but it will return one more item than expected.
Used for endpoints that need to diff elements.
"""
extra_num = 1
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + (self.per_page + self.extra_num)
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
class BaseCaseNotesHistoryViewSet(NestedGenericModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
PARENT_FIELD = "casenoteshistory_set"
lookup_field = "reference"
serializer_class = CaseNotesHistorySerializerBase
model = CaseNotesHistory
pagination_serializer_class = RelativeUrlPaginationSerializer
paginate_by = 5
paginate_by_param = "page_size"
max_paginate_by = 100
@property
def paginator_class(self):
"""
If with_extra query param is provided, the endpoint will return
n+1 elements so that the frontend can build the diff from the
current+prev element.
"""
if self.request.QUERY_PARAMS.get("with_extra", False):
return PaginatorWithExtraItem
return Paginator
def get_queryset(self, **kwargs):
qs = super(BaseCaseNotesHistoryViewSet, self).get_queryset(**kwargs)
type_param = self.request.QUERY_PARAMS.get("type", None)
summary = self.request.QUERY_PARAMS.get("summary", False)
if type_param == "operator":
qs = qs.filter(provider_notes__isnull=True)
elif type_param == "cla_provider":
qs = qs.filter(operator_notes__isnull=True)
if summary == "true":
qs = qs.filter(include_in_summary=True)
return qs
|
983,410 | 8e0a4fd83c0a9679e2466e1b7f0a62ce88a152c6 | __author__ = 'Sergey'
import os
import io
from setuptools import setup, find_packages
from nose_ittr.ittr_multiplier import __version__
here = os.path.abspath(os.path.dirname(__file__))
# workaround for bug in python <= v2.7.3 when using nose.collector
# see http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except ImportError:
pass
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst', 'CHANGES.txt')
setup(
name='nose_ittr',
version=__version__,
url='https://github.com/groveco/nose-ittr',
license='Apache Software License',
author='Sergey Ragatsky',
author_email = "serg@taykey.com",
tests_require=['nose'],
install_requires=[''],
description='nose expansion for supporting parametrized testing',
long_description=long_description,
packages=find_packages(),
include_package_data=True,
platforms='any',
keywords='nose nosetest docstring metaclass parametrized testing plugin',
test_suite='nose.collector',
zip_safe=False,
classifiers = [
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Testing',
],
entry_points={
'nose.plugins.0.10': [
'setup_ittr = nose_ittr.setup_ittr:SetupIttr'
]
},
)
|
983,411 | 906a3ce9824f1a1001268ae50ecaf9c11bb7dd0c | from django.urls import re_path
from . import views
app_name = 'Auth'
urlpatterns = [
re_path(r'^$', views.auth, name='auth'),
]
|
983,412 | 359d75ed5af41eda40467785c84ffdbccc5db3a5 | # Leia um valor em kg (quilograma), calcule e escreva o equivalente em g (grama).
# Entrada
kg = int(input('Digite o valor em quilogramas(kg): '))
# Processamento
g = kg * 1000
# Saída
print(f'{kg} quilogramas equivale a {g} gramas')
|
983,413 | ff68cd5900217888dd13dc1d94b48955270124a3 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""sample smiles from latent space"""
import rdkit
import argparse
import paddle
from src.vocab import Vocab, get_vocab
from src.utils import load_json_config
from src.jtnn_vae import JTNNVAE
from src.vocab import Vocab
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument('--nsample', type=int, required=True)
parser.add_argument('--vocab', required=True)
parser.add_argument('--model', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--config', required=True)
args = parser.parse_args()
config = load_json_config(args.config)
vocab = get_vocab(args.vocab)
vocab = Vocab(vocab)
model = JTNNVAE(vocab, config['hidden_size'], config['latent_size'],
config['depthT'], config['depthG'])
train_model_params = paddle.load(args.model)
model.set_state_dict(train_model_params)
model.eval()
res = []
for i in range(args.nsample):
smi = model.sample_prior()
print(i, smi)
res.append(smi)
with open(args.output, 'w')as f:
for smi in res:
f.write(smi + '\n')
|
983,414 | 1c07ac1c4fb1f78c57e6d8cf3fe7334abb1267ab |
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import LerpPosInterval,Func,Sequence
from panda3d.core import ConfigVariableDouble,TextureStage,TransparencyAttrib
from screen.gaming.gentity import GEntity
from tools import dist3
class GUnit(GEntity):
def __init__(self,conf):
GEntity.__init__(self,conf)
self.p3dobject.reparentTo(self.gmap.units_node)
self.p3dobject.setTransparency(TransparencyAttrib.MAlpha)
#to be put under condition for non pickable units (bonuses npc for instance)
self.p3dobject.setTag('GUnit-pickable','1')
self.p3dobject.setPos(self.gmap.root.find('**/tile_'+str(conf['tileid'])),0,0,0)
#supposedly already a float, but will screw up if not, so just making sure.
self.move_speed=float(conf['move_speed'])
self.path=[]
self.popout_when_move_over=False
self.pid=conf['pid']
#highlight
self.ts_highlighted=TextureStage('ts_highlighted')
self.ts_highlighted.setMode(TextureStage.MDecal)
self.ts_highlighted.setSort(2)
#highlight
self.ts_selected=TextureStage('ts_selected')
self.ts_selected.setMode(TextureStage.MDecal)
self.ts_selected.setSort(3)
@staticmethod
def load_resources():
GUnit.textures={ 'highlighted':loader.loadTexture('data/models/highlighted.tex.png'),
'selected':loader.loadTexture('data/models/selected.tex.png'),
}
def dispose(self):
'''del method'''
GEntity.dispose(self)
self.popout_sequence.finish()
del self.popout_sequence
def add_path(self,data):
'''
adds tile to pass by.
'''
#check for data completeness
if not 'path' in data:
out('WARNING in GUnit.add_path: incomplete data:\n'+str(data))
return
elif not isinstance(data['path'],list):
out('WARNING in GUnit.add_path: invalid data:\n'+str(data))
return
#data considered valid
self.path.extend([self.instances[eid] for eid in data['path']])
if not self.update_move in update_list:
update_list.append(self.update_move)
#out('GUnit.add_path:'+str(data))
def finish_move_to(self,data):
'''triggered by server side unit, to indicate the need to popout at end of move.'''
out('GUnit.finish_move_to()'+str(data))
if self.update_move in update_list:
self.popout_when_move_over=True
else:
self.popout()
def popout(self):
'''sets up the popout animation at end of unit's mission'''
scale=self.p3dobject.scaleInterval(.5,(.1,.1,.1))
finish=Func(lambda:dispose_list.append(self))
self.popout_sequence=Sequence(scale,finish)
self.popout_sequence.start()
def set_highlighted(self):
self.p3dobject.setTexture(self.ts_highlighted,self.textures['highlighted'])
def unset_highlighted(self):
self.p3dobject.clearTexture(self.ts_highlighted)
def set_selected(self):
self.p3dobject.setTexture(self.ts_selected,self.textures['selected'])
def unset_selected(self):
self.p3dobject.clearTexture(self.ts_selected)
def update_move(self):
'''called every frame during while a move.'''
if len(self.path)==0:
out('WARNING in GUnit.update_move: path is empty, but method still called. removing it.')
update_list.remove(self.update_move)
return
if not hasattr(self,'move_interval'):
#start moving
#first 3 args=model,duration,pos, the duration=1/... is relative to server side tile side size
self.move_interval=LerpPosInterval(self.p3dobject,
(1/(self.move_speed*ConfigVariableDouble('clock-frame-rate').getValue())),
self.path[0].p3dobject.getPos(),
name='interval_unit_move_'+str(self.eid)
)
self.p3dobject.lookAt(self.path[0].p3dobject.getPos())
self.p3dobject.loop('run')
self.move_interval.start()
else:
#is move ~over ?
#t=self.move_interval.getT()
#d=self.move_interval.getDuration()
#d=d-t
d=dist3(self.p3dobject,self.path[0].p3dobject)
#out('client '+str(t*100./d)+'%')
#arrived
if d<self.move_speed:
#out('client '+str(self.path[0].eid)+'@'+str(self.frame_no))
self.p3dobject.setPos(self.path[0].p3dobject,0,0,0)
self.path.pop(0)
if len(self.path)==0:
self.p3dobject.stop()
self.move_interval.finish()
del self.move_interval
update_list.remove(self.update_move)
if self.popout_when_move_over:
self.popout()
else:
#first 3 args=model,duration,pos
self.move_interval.finish()
self.move_interval=LerpPosInterval(self.p3dobject,
(1/(self.move_speed*ConfigVariableDouble('clock-frame-rate').getValue())),
self.path[0].p3dobject.getPos(),
name='interval_unit_move_'+str(self.eid)
)
self.p3dobject.lookAt(self.path[0].p3dobject.getPos())
self.move_interval.start()
class GH_Sprinter(GUnit):
def __init__(self,conf):
self.p3dobject=Actor('data/models/units/v_sprinter.egg',
{'run':'data/models/units/v_sprinter-run.egg',
}
)
GUnit.__init__(self,conf)
self.p3dobject.setName('GH_Sprinter_'+str(self.eid))
class GV_Sprinter(GUnit):
def __init__(self,conf):
self.p3dobject=Actor('data/models/units/v_sprinter.egg',
{'run':'data/models/units/v_sprinter-run.egg',
}
)
GUnit.__init__(self,conf)
|
983,415 | 0f98901f113a5f96bceb642bf4855a019d519725 | # https://leetcode-cn.com/problems/shortest-unsorted-continuous-subarray/
class Solution:
def findUnsortedSubarray(self, nums) -> int:
tmp = sorted(nums)
start = 0
end = len(nums)-1
while(start <= end):
if nums[start] != tmp[start]:
break
start += 1
while (end >= 0):
if nums[end] != tmp[end]:
break
end -= 1
return max(0, end-start+1)
if __name__ == "__main__":
pass
|
983,416 | 1b1b6b517b101898259c5e19c2465792fcbc46ba | # @Time : 2017/10/4 下午1:58
# @Author : user_info
import json
class Protocol(object):
@staticmethod
def json_put(filename, filesize, status="200", total=0, available=0):
msg_dic = {
"action": "put",
"status": status,
"filename": filename,
"filesize": filesize,
"total": total,
"available": available
}
return json.dumps(msg_dic)
@staticmethod
def json_auth(account=None, password=None, status="200", total=0, available=0):
msg_dic = {
"action": "auth",
"account": account,
"password": password,
"status": status,
"total": total,
"available": available
}
return json.dumps(msg_dic)
@staticmethod
def json_get(filename, filesize, status="200"):
msg_dic = {
"action": "get",
"status": status,
"filename": filename,
"filesize": filesize
}
return json.dumps(msg_dic)
@staticmethod
def json_ls(resultsize=0, status="200"):
msg_dic = {
"action": "ls",
"status": status,
"resultsize": resultsize
}
return json.dumps(msg_dic)
@staticmethod
def json_pwd(status="200"):
msg_dic = {
"action": "pwd",
"status": status
}
return json.dumps(msg_dic)
@staticmethod
def json_mkdir(cmd, status="200"):
msg_dic = {
"action": "mkdir",
"status": status,
"cmd": cmd
}
return json.dumps(msg_dic)
@staticmethod
def json_cd(dir_name, status="200"):
msg_dic = {
"action": "cd",
"status": status,
"dirname": dir_name
}
return json.dumps(msg_dic)
@staticmethod
def json_rm(filename=None, status="200", total=0, available=0):
msg_dic = {
"action": "rm",
"status": status,
"filename": filename,
"total": total,
"available": available
}
return json.dumps(msg_dic) |
983,417 | 5b327bdac4b8d502891feea93b6306dd73a42962 | listLen = int(input('Enter the length of list : '))
myList = []
for i in range(0,listLen):
currNum = int(input(f'Enter num at index {i} : '))
myList.append(currNum)
sum = 0
for num in myList:
sum += num
print(myList)
print(f'Sum of all elements of the list : {sum}') |
983,418 | 76bf2ef7eb70ade6b757eb1caef4e9575fa99f1a | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import tensorflow as tf
from tflib.data.dataset import batch_dataset, Dataset
_N_CPU = multiprocessing.cpu_count()
def disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):
"""Disk image batch dataset.
This function is suitable for jpg and png files
img_paths: string list or 1-D tensor, each of which is an iamge path
labels: label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label
"""
if labels is None:
dataset = tf.data.Dataset.from_tensor_slices(img_paths)
elif isinstance(labels, tuple):
dataset = tf.data.Dataset.from_tensor_slices((img_paths,) + tuple(labels))
else:
dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels))
def parse_func(path, *label):
img = tf.read_file(path)
img = tf.image.decode_png(img, 3)
return (img,) + label
if map_func:
def map_func_(*args):
return map_func(*parse_func(*args))
else:
map_func_ = parse_func
# dataset = dataset.map(parse_func, num_parallel_calls=num_threads) is slower
dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter,
map_func_, num_threads, shuffle, buffer_size, repeat)
return dataset
class DiskImageData(Dataset):
"""DiskImageData.
This function is suitable for jpg and png files
img_paths: string list or 1-D tensor, each of which is an iamge path
labels: label list or tensor, each of which is a corresponding label
"""
def __init__(self, img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):
super(DiskImageData, self).__init__()
dataset = disk_image_batch_dataset(img_paths, batch_size, labels, prefetch_batch, drop_remainder, filter,
map_func, num_threads, shuffle, buffer_size, repeat)
self._bulid(dataset, sess)
|
983,419 | f10bc67192b489d4d352796630f3c608070e28b8 | #!/bin/env python2.7
# Mailmon v2.0 by Kyle Claisse
# This script reads in an email in text form and executes certain functions based on the content of the message.
#
# Current requires a working MTA and the mail command on the box this bot is running (I use Heirloom Mail v12.4)
# Probably should rewrite to use the smtplib python library but I never needed to. Would be trivial to rewrite to
# allow using third-party SMTP servers like gmail. Something for version 2.1 I guess.
import email
import re
from time import strftime
import os
from subprocess import check_output as sp_co
from difflib import SequenceMatcher as sMatcher
#######################
#### OPTIONS START ####
#######################
#This is the email address the autoresponder bot will use for the From: address in its return email.
FROM_ADDRESS = "mailmon@example.com"
#This is the path where the mail.txt is being output to. See readme for details.
path = "/home/mailmon/data"
#Approved email addresses. Only approved addresses can use this bot. Just add your approved emails to this list like in the example.
#If you want to allow anyone to use this bot, just add ALLOW_ALL.
authorized_emails = []
authorized_emails.append("authorized_person@example.com")
authorized_emails.append("another_authorized_person@example.com")
#This is the list of commands. If you don't add your command to this list it won't appear in any help messages.
cmdlist = []
cmdlist.append("uptime")
cmdlist.append("whos-logged-in")
#When an unknown command is received it will try to match the unknown command against the command list.
#This controls the sensitivity of the matcher, a higher number is more restrictive matching.
#0.75 is a good value to keep it at.
_MASTER_RATIO_ = 0.75
#These you don't need to change (unless you know what you are doing)
emf = path + os.sep + "mail.txt"
outfile = path + os.sep + "out.txt"
#####################
#### OPTIONS END ####
#####################
def check_address(addy):
for x in authorized_emails:
if x == "ALLOW_ALL":
return True
if x == addy:
return True
return False
def main():
return_message = "UNKNOWN ERROR, RETURN EMAIL NOT SET"
email_file = open(emf, 'r')
msgobj = email.message_from_file(email_file)
email_file.close()
if msgobj.is_multipart() is False:
message = msgobj.get_payload()
return_addy = msgobj.__getitem__("from")
#We lower the received message to make matching easier
nmsg = message.lower().strip()
#FOR DEBUGGING!
pq = open(path + os.sep + "mailmon_use.log", 'a')
log_message = str(strftime("%A %B %d, %Y - %r %Z")) + " :: " + return_addy + " :: " + nmsg
pq.write(log_message)
pq.write("\n")
pq.close()
if check_address(return_addy) is not True:
return_message = "You are not authorized to use this service. Your previous and future messages will be logged."
#############################
#### COMMANDS START HERE ####
#############################
elif "hi" in nmsg or "hello" in nmsg or "hey" in nmsg:
return_message = "Hello there! The current time here is: " + str(strftime("%A %B %d, %Y - %r %Z"))
elif nmsg == "uptime":
uptime = sp_co('''uptime | egrep "([0-9][0-9]:?){3}.up.[0-9]{1,4}.days?" -o''', shell=True)
return_message = "Current system uptime is: " + uptime.strip()
elif nmsg == "whos-logged-in":
try:
last_raw = sp_co("last -w -F | grep logged", shell=True).strip()
except:
last_raw = ""
last_list = re.split("\n", last_raw)
return_message = "There are currently %s users logged in%s"
num = 0
for entry in last_list:
entry = entry.strip()
line_match = re.match("^(.*?)\s+(.*?)\s+(.*?)\s+.*$", entry)
if line_match is not None:
num += 1
return_message += "\n%s from %s" % (line_match.group(1), line_match.group(3))
if num == 0:
return_message = return_message % (str(num), "")
else:
return_message = return_message % (str(num), ":")
#############################################
#### ADD MORE COMMANDS BELOW USING ELIFS ####
#############################################
elif "help" in nmsg:
cmdstr = "%s" % (cmdlist[3],)
for cmd in cmdlist[4:]:
cmdstr += ", " + cmd
return_message = "List of commands: %s" % cmdstr
else:
return_message = "Unknown command, %s." % str(nmsg)
#Try and match the failed command to a known command
for cmd in cmdlist:
if sMatcher(None, nmsg.lower(), cmd).ratio() > _MASTER_RATIO_:
return_message += " Did you mean %s? " % cmd
break
return_message += " Reply with 'help' for a list of commands."
send_email(return_message, return_addy)
def send_email(message, to_address):
if len(message) < 1:
return
#Remove file first
try:
os.remove(outfile)
except:
pass
ofile = open(outfile, 'w')
ofile.write(message)
ofile.close()
#Check if we have a good return email
if "ERROR" not in to_address:
#Send email out
cmd = "mail -r %s %s < %s" % (FROM_ADDRESS, to_address, outfile)
os.system(cmd)
else:
#Write the error to the log
pq = open(path + os.sep + "mailmon_use.log", 'a')
log_message = str(strftime("%A %B %d, %Y - %r %Z")) + " :: " + to_address
pq.write(log_message)
pq.write("\n")
pq.close()
#Remove both email files
try:
os.remove(outfile)
except:
pass
try:
os.remove(emf)
except:
pass
if __name__ == "__main__":
main()
|
983,420 | 0a14d099402299229757b0ef2109debe206e0760 | # B+ Mode Package
# Released under MIT License
# Copyright (c) 2020 TytusDb Team
from . import AVLTree
from . import BplusTree
import os
import pickle
from . import Serializable as serializable
import re
import shutil
def checkData():
if not os.path.isdir("./Data"):
os.mkdir("./Data")
if not os.path.isdir("./Data/BPlusMode"):
os.mkdir("./Data/BPlusMode")
if not os.path.isfile("./Data/BPlusMode/Databases.bin"):
with open("./Data/BPlusMode/Databases.bin", 'wb') as f:
dataBaseTree = AVLTree.AVLTree()
pickle.dump(dataBaseTree, f)
# Checks if the name is a valid SQL Identifier
def validateIdentifier(identifier):
# Returns true if is valid
return re.search("^[a-zA-Z][a-zA-Z0-9#@$_]*", identifier)
def createDatabase(database):
if type(database) !=str:
return 1
checkData()
if database and validateIdentifier(database):
dataBaseTree = serializable.Read('./Data/BPlusMode/', 'Databases')
root = dataBaseTree.getRoot()
if dataBaseTree.search(root, database):
return 2
else:
dataBaseTree.add(root, database)
serializable.write('./Data/BPlusMode/', database, AVLTree.AVLTree())
serializable.update('./Data/BPlusMode/', 'Databases', dataBaseTree)
return 0
else:
return 1
def showDatabases():
checkData()
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
dbKeys = dataBaseTree.postOrder(root)
return [] if len(dbKeys) == 0 else dbKeys[:-1].split("-")
def alterDatabase(dataBaseOld, dataBaseNew) -> int:
if type(dataBaseOld) !=str or type(dataBaseNew)!=str:
return 1
checkData()
if validateIdentifier(dataBaseOld) and validateIdentifier(dataBaseNew):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, dataBaseOld):
return 2
if dataBaseTree.search(root, dataBaseNew):
return 3
dataBaseTree.delete(root, dataBaseOld)
root = dataBaseTree.getRoot()
serializable.Rename('./Data/BPlusMode/', dataBaseOld, dataBaseNew)
dataBaseTree.add(root, dataBaseNew)
serializable.update('./Data/BPlusMode/', 'Databases', dataBaseTree)
return 0
else:
return 1
def dropDatabase(database):
if type(database) !=str:
return 1
checkData()
if validateIdentifier(database):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return 2
dataBaseTree.delete(root, database)
serializable.delete('./Data/BPlusMode/' + database)
serializable.update('./Data/BPlusMode/', 'Databases', dataBaseTree)
return 0
else:
return 1
# ---------------CRUD TABLE----------------#
# ----------------Erick--------------------#
def createTable(database, table, numberColumns):
if type(database) !=str or type(table)!=str or type(numberColumns)!=int:
return 1
# Validates identifier before searching
if validateIdentifier(database) and validateIdentifier(table) and numberColumns > 0:
checkData()
# Get the databases tree
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
# Get the dbNode
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if tablesTree.search(tablesTree.getRoot(), table):
return 3
else:
# Creates new table node
tablesTree.add(tablesTree.getRoot(), table)
serializable.update(f"./Data/BPlusMode/{database}/", database, tablesTree)
# Creates bin file for the new table
serializable.write(f"./Data/BPlusMode/{database}/", table, BplusTree.BPlusTree(5, numberColumns))
return 0
else:
return 2
else:
return 1
def showTables(database):
if type(database) !=str:
return 1
checkData()
if validateIdentifier(database):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
if dataBaseTree.search(dataBaseTree.getRoot(), database):
db = serializable.Read(f"./Data/BPlusMode/{database}/", database)
dbKeys = db.postOrder(db.getRoot())
return [] if len(dbKeys) == 0 else dbKeys[:-1].split("-")
else:
return None
else:
return None
def extractTable(database, table):
if type(database) !=str or type(table)!=str:
return None
checkData()
if validateIdentifier(database) and validateIdentifier(table):
# Get the databases tree
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
# Get the dbNode
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if tablesTree.search(tablesTree.getRoot(), table):
table = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
return list(table.lista().values())
else:
return None
else:
return None
else:
return None
def extractRangeTable(database, table, columnNumber, lower, upper):
if type(database) !=str or type(table)!=str or type(columnNumber)!=int:
return None
checkData()
if validateIdentifier(database) and validateIdentifier(table):
# Get the databases tree
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
# Get the dbNode
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if tablesTree.search(tablesTree.getRoot(), table):
table = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
tableList = list(table.lista().values())
validList = []
if columnNumber < 0 or columnNumber >= table.columns:
return None
try:
for i in tableList:
if type(i[columnNumber]) == str:
if str(i[columnNumber]) <= str(upper) and str(i[columnNumber]) >= str(lower):
validList.append(i)
elif type(i[columnNumber]) == float:
if float(i[columnNumber]) <= float(upper) and float(i[columnNumber]) >= float(lower):
validList.append(i)
elif type(i[columnNumber]) == int:
if int(i[columnNumber]) <= int(upper) and int(i[columnNumber]) >= int(lower):
validList.append(i)
elif type(i[columnNumber]) == bool:
if bool(i[columnNumber]) <= bool(upper) and bool(i[columnNumber]) >= bool(lower):
validList.append(i)
except:
return None
return validList
else:
return None
else:
return None
else:
return None
# ---------------Dyllan--------------------#
def alterAddPK(database: str, table: str, columns: list) -> int:
try:
if type(database)!=str or type(table)!=str or type(columns)!=list:
return 1
checkData()
# Get the databases tree
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
# Get the dbNode
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
else:
tuplaTree = serializable.Read(f"./Data/BPlusMode/{database}/{table}/", table)
try:
res = tuplaTree.CreatePK(columns)
except:
return 1
if res:
return res
else:
serializable.update(f"./Data/BPlusMode/{database}/{table}/", table, tuplaTree)
return 0
else:
return 2 # database no
else:
return 1
except:
return 1
def alterDropPK(database: str, table: str) -> int:
try:
if type(database)!=str or type(table)!=str:
return 1
checkData()
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return 2 # database no existente
else:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
PKsTree = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
res = PKsTree.DeletePk()
if res:
return res
else:
serializable.update(f'./Data/BPlusMode/{database}/{table}/', table, PKsTree)
return 0 # exito
else:
return 1
except:
return 1
# def alterAddFK(database: str, table: str, references: dict) -> int:
# def alterAddIndex(database: str, table: str, references: dict) -> int:
def alterTable(database: str, tableOld: str, tableNew: str) -> int:
if type(database) !=str or type(tableOld)!=str or type(tableNew)!=str:
return 1
checkData()
if validateIdentifier(tableOld) and validateIdentifier(tableNew):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
rootT = tablesTree.getRoot()
if not tablesTree.search(rootT, tableOld):
return 3 #tableOLD no existente
elif tablesTree.search(rootT, tableNew):
return 4 #tableNEW existente
tablesTree.delete(rootT, tableOld)
serializable.Rename(f'./Data/BPlusMode/{database}/', tableOld, tableNew)
tablesTree.add(tablesTree.getRoot(), tableNew)
serializable.update(f"./Data/BPlusMode/{database}/", database, tablesTree)
return 0
else:
return 2 #db no existente
else:
return 1
def alterAddColumn(database: str, table: str, default: any) -> int:
try:
if type(database)!=str or type(table)!=str:
return 1
checkData()
if validateIdentifier(database) and validateIdentifier(table):
# Get the databases tree
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
# Get the dbNode
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
else:
tuplaTree = serializable.Read(f"./Data/BPlusMode/{database}/{table}/", table)
res = tuplaTree.addColumn(default)
if res:
return res
else:
serializable.update(f"./Data/BPlusMode/{database}/{table}/", table, tuplaTree)
return 0
else:
return 2 # database no existente
else:
return 1
except:
return 1
def alterDropColumn(database: str, table: str, columnNumber: int) -> int:
try:
if type(database)!=str or type(table)!=str or type(columnNumber)!=int:
return 1
checkData()
if validateIdentifier(database) and validateIdentifier(table):
# Get the databases tree
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
# Get the dbNode
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
else:
tuplaTree = serializable.Read(f"./Data/BPlusMode/{database}/{table}/", table)
if columnNumber < 0 or columnNumber >= tuplaTree.columns:
return 5 #out of limit
else:
res = tuplaTree.dropColumn(columnNumber)
if res:
return res
else:
serializable.update(f"./Data/BPlusMode/{database}/{table}/", table, tuplaTree)
return 0
else:
return 2 # database no existente
else:
return 1
except:
return 1
def dropTable(database: str, table: str) -> int:
try:
if type(database)!=str or type(table)!=str:
return 1
checkData()
# Get the databases tree
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
databaseNode = dataBaseTree.search(dataBaseTree.getRoot(), database)
# If DB exist
if databaseNode:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
root = tablesTree.getRoot()
if not tablesTree.search(root, table):
return 3 #table no existente
else:
tablesTree.delete(root, table)
serializable.delete(f"./Data/BPlusMode/{database}/{table}")
serializable.update(f"./Data/BPlusMode/{database}/", database, tablesTree)
return 0
else:
return 2
except:
return 1
# ---------------CRUD TUPLA----------------#
# ---------------Rudy----------------------#
def dropAll():
if os.path.isdir('./Data/BPlusMode'):
shutil.rmtree('./Data/BPlusMode')
def insert(database, table, register):
if type(database) !=str or type(table)!=str or type(register)!=list:
return 1
checkData()
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return 2 # database no existente
else:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
PKsTree = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
res = PKsTree.register(register)
if res:
return res
serializable.update(f'./Data/BPlusMode/{database}/{table}/', table, PKsTree)
return 0 # exito
else:
return 1
def loadCSV(filepath, database, table, tipado):
if type(database) !=str or type(table)!=str or type(filepath)!=str:
return []
checkData()
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return []
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return []
try:
res = []
import csv
PKsTree = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
with open(filepath, 'r') as file:
reader = csv.reader(file, delimiter=',')
j = 0
for row in reader:
if tipado:
i=0
for x in row:
if tipado[j][i] == bool:
if x == 'False':
row[i] = bool(1)
else:
row[i] = bool(0)
else:
row[i] = tipado[j][i](x)
i=i+1
j+=1
res.append(PKsTree.register(row))
serializable.update(f'./Data/BPlusMode/{database}/{table}/', table, PKsTree)
return res
except:
return []
else:
return []
def extractRow(database, table, columns):
if type(database) !=str or type(table)!=str or type(columns)!=list:
return []
checkData()
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return [] # database no existente
else:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return [] # table no existente
PKsTree = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
return PKsTree.search(columns) # exito
else:
return []
def update(database, table, register, columns):
if type(database) !=str or type(table)!=str or type(register)!=dict or type(columns)!=list:
return 1
checkData()
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return 2 # database no existente
else:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
PKsTree = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
try:
res = PKsTree.update(register, columns)
serializable.update(f'./Data/BPlusMode/{database}/{table}/', table, PKsTree)
return res
except:
return 1
else:
return 1
def delete(database, table, columns):
if type(database) !=str or type(table)!=str or type(columns)!=list:
return 1
checkData()
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return 2 # database no existente
else:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
PKsTree = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
if len(PKsTree.search(columns)):
try:
PKsTree.delete(columns)
serializable.update(f'./Data/BPlusMode/{database}/{table}/', table, PKsTree)
return 0
except:
return 1
else:
return 4
else:
return 1
def truncate(database, table):
if type(database) !=str or type(table)!=str:
return 1
checkData()
if validateIdentifier(database) and validateIdentifier(table):
dataBaseTree = serializable.Read('./Data/BPlusMode/', "Databases")
root = dataBaseTree.getRoot()
if not dataBaseTree.search(root, database):
return 2 # database no existente
else:
tablesTree = serializable.Read(f"./Data/BPlusMode/{database}/", database)
if not tablesTree.search(tablesTree.getRoot(), table):
return 3 # table no existente
PKsTree = serializable.Read(f'./Data/BPlusMode/{database}/{table}/', table)
try:
PKsTree.truncate()
serializable.update(f'./Data/BPlusMode/{database}/{table}/', table, PKsTree)
return 0
except:
return 1
else:
return 1
def showCollection():
checkData()
dataB = showDatabases()
print('DataBases: ',dataB)
for x in dataB:
print("")
print("********************* [ DATABASE: "+str(x)+"] *********************")
dataT = showTables(x)
print(x,"Tables:",dataT)
for y in dataT:
print("")
print("---------------------- [ TABLE: "+str(y)+"] ----------------------")
dataTupla = extractTable(x, y)
for z in dataTupla:
print(z)
# ---------------Marcos--------------------#
|
983,421 | 4b9c30075a344559279e255aa609965678a607b6 | #!/usr/bin/env python
"""
This is a way to save the startup time when running img2py on lots of
files...
"""
from wx.tools import img2py
command_lines = [
"-a -F -n setting img/process.png images.py"
]
if __name__ == "__main__":
for line in command_lines:
args = line.split()
img2py.main(args)
|
983,422 | 511ddd54b77b84523eb8922e53f39e2471c76cdc | # Diese Datei ist für den Schwesterbot der Pixeldrohne, PixelDev gedacht.
import discord
import sys
import asyncio
import keys
import random
import urllib.request
client = discord.Client()
@client.event
async def on_ready():
print('Eingeloggt als')
print(client.user.name)
print(client.user.id)
print('------------------------')
await client.change_presence(game=discord.Game(name='mit dev.help', type=1, url="https://twitch.tv/pilleniusmc"))
@client.event
async def on_message(message):
# Hilfe für Dev-Branch und Cutting Edge
if message.content.lower().startswith('dev.help'):
user = message.author
embed = discord.Embed(
title="Kategorie: Test",
description="Alle Befehle, die hier aufgeführt sind noch in der Testphase, heißt sie können komplett "
"verbuggt sein.\n[Beta]: Sollte soweit stabil sein.\n[Alpha]: Könnte zu Abstürzen führen."
)
embed.add_field(name="Error 404", value="There seems to be nothing.")
await client.send_message(user, embed=embed)
if message.content.lower().startswith('dev.lsd'):
# öffnen = open("config/zitate.txt", "r", encoding='utf-8')
öffnen = urllib.request.urlopen("https://sherlock-holm.es/stories/plain-text/cano.txt")
for line in öffnen:
line = line.strip()
line = str(line)
if not line == "b''":
line = line[2:]
line = line.rstrip('\'')
await client.send_message(message.author, line)
await asyncio.sleep(1)
if message.content.lower().startswith('dev.tt'):
ttemb = discord.Embed(
title="PilleniusMC",
description="So könnte ein Twitch Chat Embed aussehen.",
color=0x6441a4
)
await client.send_message(message.channel, embed=ttemb)
if message.author.id == keys.pmcid and message.content.lower().startswith('dev.halt'):
await client.close()
sys.exit(1)
@client.event
async def on_reaction_add(reaction, user):
msg = reaction.message
chat = reaction.message.channel
if reaction.emoji == "👎" and msg.id == messageid and not user.id == client.user.id:
lol_msg = await client.send_message(chat, "Hey {0}! Du sollst dem bitte nicht auch noch den Daumen geben.".format(user.mention))
await client.remove_reaction(msg, "👎", user)
await asyncio.sleep(3)
await client.delete_message(lol_msg)
# client.start(keys.dev)
# client.run(keys.dev)
client.run(keys.eng)
|
983,423 | 15c5856a48e1c6d3aef9f0940c1bdae346dec52c | #!/usr/bin/env python3
# -*- coding: latin-1 -*-
import argparse
import os.path
import encodedcc
import sys
EPILOG = '''
Script to fix the controlled_by backfill problems
This is a dryrun default script, run with '--update' to PATCH data
Useage:
%(prog)s --infile MyFile.txt
%(prog)s --infile ENCSR000AAA
%(prog)s --infile ENCSR000AAA,ENCSR000AAB,ENCSR000AAC
%(prog)s --query "/search/?type=Experiment"
Script will take a file with single column list of accessions
Can also take a single accession or comma separated list of accessions
A query from which to gather accessions
%(prog)s --method single
%(prog)s --method multi
%(prog)s --method biosample
There are three methods to pick from
"single" assumes one replicate in the control
"multi" assumes one control with number of replicates equal to number of replicates in experiment
"biosample" assumes multiple controls that should be matched on biosample
***By NOT selecting the '--method' option the script will try to guess at what the correct method is***
%(prog)s --ignore_runtype
This makes the script ignore the value of the paired ends, default is off
%(prog)s --missing
Script will print out only the names of files missing controlled_by
For more details:
%(prog)s --help
'''
def getArgs():
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--method',
help="'single' = there is only one replicate in the control, \
'multi' = one control with same number of replicates as experiment has replicates, \
'biosample' = multiple controls should be matched on the biosample",
choices=["single", "multi", "biosample"])
parser.add_argument('--ignore_runtype',
help="Ignores value of paired-end. Default is off",
default=False,
action='store_true')
parser.add_argument('--infile',
help="file containing single column list of object accessions,\
single accession, or comma separated list of accessions")
parser.add_argument('--query',
help="query of objects you want to process")
parser.add_argument('--key',
default='default',
help="The keypair identifier from the keyfile. \
Default is --key=default")
parser.add_argument('--keyfile',
default=os.path.expanduser("~/keypairs.json"),
help="The keypair file. Default is --keyfile=%s" % (os.path.expanduser("~/keypairs.json")))
parser.add_argument('--debug',
default=False,
action='store_true',
help="Print debug messages. Default is off.")
parser.add_argument('--update',
default=False,
action='store_true',
help="Let the script PATCH the data. Default is off")
parser.add_argument('--missing',
default=False,
action='store_true',
help="Only print files that are missing controlled_by.\
Default is off")
args = parser.parse_args()
return args
class BackFill:
def __init__(self, connection, debug=False, missing=False, update=False, ignore_runtype=False):
self.connection = connection
self.DEBUG = debug
self.MISSING = missing
self.update = update
self.ignore_runtype = ignore_runtype
self.dataList = []
def updater(self, exp, con):
''' helper function runs the update step'''
temp = encodedcc.get_ENCODE(
exp + '?datastore=database', self.connection).get("controlled_by", [])
if con not in temp:
control = temp + [con]
patch_dict = {"controlled_by": control}
print("patching experiment file {} with controlled_by {}".format(exp, con))
encodedcc.patch_ENCODE(exp, self.connection, patch_dict)
else:
print("ERROR: controlled_by for experiment file {} already contains {}".format(
exp, con))
def single_rep(self, obj):
'''one control with one replicate in control,
multiple replicates in experiment'''
control_files = encodedcc.get_ENCODE(
obj["possible_controls"][0]["accession"], self.connection, frame="embedded").get("files", [])
if len(control_files) == 0:
if self.DEBUG:
print("Control object {} has no files".format(
obj["possible_controls"][0]["accession"]), file=sys.stderr)
return
for c in control_files:
if c.get("file_type", "") == "fastq":
exp_list = []
for e in obj["files"]:
if e.get("file_type", "") == "fastq":
if not self.MISSING or (self.MISSING and not e.get("controlled_by")):
exp_list.append(e["accession"])
for exp in exp_list:
temp = {"ExpAcc": obj["accession"], "Method": "Single",
"ExpFile": exp, "ConFile": c["accession"]}
self.dataList.append(temp)
if self.update:
self.updater(exp, c["accession"])
if self.DEBUG:
print("ExpFile: {}, ConFile: {}".format(
temp["ExpFile"], temp["ConFile"]))
def pair_dict_maker(self, x_data, x):
''' helper function makes the exp_data
and con_data dictionaries'''
x_file_bio_num = x.get("biological_replicates")
x_file_paired = x.get("paired_end")
x_file_acc = x["accession"]
if self.ignore_runtype:
x_file_paired = None
x_pair = str(x_file_bio_num[0]) + "-" + str(x_file_paired)
x_data[x_file_acc] = x_pair
def multi_rep(self, obj):
'''one control, with one replicate in
control per replicate in experiment'''
control_files = encodedcc.get_ENCODE(
obj["possible_controls"][0]["accession"], self.connection, frame="embedded").get("files", [])
control_replicates = obj["possible_controls"][0].get("replicates", [])
exp_data = {}
con_data = {}
if len(control_replicates) != len(obj["replicates"]):
if self.DEBUG:
print("Control has {} replicates and experiment has {} replicates".format(
len(control_replicates), len(obj["replicates"])), file=sys.stderr)
return
if len(control_files) == 0:
if self.DEBUG:
print("Control {} has no files".format(
obj["possible_controls"][0]["accession"]), file=sys.stderr)
return
for e in obj["files"]:
if e.get("file_type", "") == "fastq":
if not self.MISSING or (self.MISSING and not e.get("controlled_by")):
self.pair_dict_maker(exp_data, e)
for c in control_files:
if c.get("file_type", "") == "fastq":
self.pair_dict_maker(con_data, c)
if self.ignore_runtype:
self.mini(exp_data, con_data, obj)
else:
self.mini(con_data, exp_data, obj)
def mini(self, x_data, y_data, obj):
''' just a helper function
does all the fancy sorting for multi rep
'''
for x_key in x_data.keys():
temp_list = []
for y_key in y_data.keys():
if x_data[x_key] == y_data[y_key]:
temp_list.append(y_key)
if self.ignore_runtype:
for t in temp_list:
temp = {
"ExpAcc": obj["accession"], "Method": "Multi-runtype ignored", "ExpFile": x_key, "ConFile": t}
self.dataList.append(temp)
if self.update:
self.updater(x_key, t)
if self.DEBUG:
print("ExpFile: {}, ConFile: {}".format(
temp["ExpFile"], temp["ConFile"]))
else:
for t in temp_list:
temp = {
"ExpAcc": obj["accession"], "Method": "Multi", "ExpFile": t, "ConFile": x_key}
self.dataList.append(temp)
if self.update:
self.updater(t, x_key)
if self.DEBUG:
print("ExpFile: {}, ConFile: {}".format(
temp["ExpFile"], temp["ConFile"]))
def multi_control(self, obj):
'''multiple controls, match on biosample'''
con_data = {}
val = True
for con in obj["possible_controls"]:
c = encodedcc.get_ENCODE(
con["accession"], self.connection, frame="embedded")
if c.get("replicates"):
for rep in c["replicates"]:
if c.get("files"):
con_bio_acc = rep["library"]["biosample"]["accession"]
con_bio_num = rep["biological_replicate_number"]
for f in c["files"]:
if f.get("file_type", "") == "fastq":
con_file_bio_num = f["biological_replicates"]
if con_bio_num in con_file_bio_num:
con_file_acc = f["accession"]
con_data[con_bio_acc] = con_file_acc
else:
if self.DEBUG:
print("No files found for control {}".format(
con["accession"]), file=sys.stderr)
val = False
else:
if self.DEBUG:
print("No replicates found in control {}".format(
con["accession"]), file=sys.stderr)
val = False
if val:
exp_data = {}
for e in obj["replicates"]:
exp_bio_acc = e["library"]["biosample"]["accession"]
exp_bio_num = e["biological_replicate_number"]
for f in obj["files"]:
if f.get("file_type", "") == "fastq":
if not self.MISSING or (self.MISSING and not f.get("controlled_by")):
exp_file_bio_num = f["biological_replicates"]
if exp_bio_num in exp_file_bio_num:
exp_file_acc = f["accession"]
exp_data[exp_bio_acc] = exp_file_acc
for key in exp_data.keys():
if con_data.get(key):
temp = {"ExpAcc": obj["accession"], "Method": "Biosample",
"ExpFile": exp_data[key], "ConFile": con_data[key]}
self.dataList.append(temp)
if self.update:
self.updater(exp_data[key], con_data[key])
if self.DEBUG:
print("Biosample: {}, ExpFile: {}, ConFile: {}".format(
key, temp["ExpFile"], temp["ConFile"]))
def main():
args = getArgs()
key = encodedcc.ENC_Key(args.keyfile, args.key)
connection = encodedcc.ENC_Connection(key)
accessions = []
if args.update:
print("This is an UPDATE run data will be PATCHed")
else:
print("This is a dryrun, no data will be changed")
if args.infile:
if os.path.isfile(args.infile):
accessions = [line.rstrip('\n') for line in open(args.infile)]
else:
accessions = args.infile.split(",")
elif args.query:
if "search" in args.query:
temp = encodedcc.get_ENCODE(
args.query, connection).get("@graph", [])
else:
temp = [encodedcc.get_ENCODE(args.query, connection)]
if any(temp):
for obj in temp:
if obj.get("accession"):
accessions.append(obj["accession"])
elif obj.get("uuid"):
accessions.append(obj["uuid"])
elif obj.get("@id"):
accessions.append(obj["@id"])
elif obj.get("aliases"):
accessions.append(obj["aliases"][0])
if len(accessions) == 0:
# if something happens and we end up with no accessions stop
print("ERROR: object has no identifier", file=sys.stderr)
sys.exit(1)
else:
for acc in accessions:
obj = encodedcc.get_ENCODE(acc, connection, frame="embedded")
isValid = True
check = ["replicates", "files"]
for c in check:
if not obj.get(c):
if args.debug:
print("Missing {} for {}".format(
c, acc), file=sys.stderr)
isValid = False
if obj.get("possible_controls"):
for p in obj["possible_controls"]:
for c in check:
if not obj.get(c):
if args.debug:
print("Missing {} for {}".format(
c, p["accession"]), file=sys.stderr)
isValid = False
else:
isValid = False
if args.debug:
print("Missing possible_controls for {}".format(
acc), file=sys.stderr)
if isValid:
backfill = BackFill(connection, debug=args.debug, missing=args.missing,
update=args.update, ignore_runtype=args.ignore_runtype)
if args.method == "single":
if args.debug:
print("SINGLE REP {}".format(acc))
backfill.single_rep(obj)
elif args.method == "multi":
if args.debug:
print("MULTI REP {}".format(acc))
backfill.multi_rep(obj)
elif args.method == "biosample":
if args.debug:
print("BIOSAMPLE {}".format(acc))
backfill.multi_control(obj)
else:
exp_rep = len(obj["replicates"])
exp_con = len(obj["possible_controls"])
if exp_con == 1:
# one possible control
con_rep = len(obj["possible_controls"]
[0]["replicates"])
if con_rep == exp_rep:
# same number experiment replicates as control replicates
# method is multi
if args.debug:
print("MULTI REP {}".format(acc))
backfill.multi_rep(obj)
elif con_rep == 1:
# one control replicate and multiple experiment replicates
# method is single
if args.debug:
print("SINGLE REP {}".format(acc))
backfill.single_rep(obj)
else:
if args.debug:
print("Experiment {} contains {} experiment replicates and {} control replicates and so does not fit the current pattern!".format(
acc, exp_rep, con_rep))
elif exp_con > 1:
# more than one possible control
con_reps = 0
for con in obj["possible_controls"]:
if len(con["replicates"]) == 1:
con_reps += 1
if con_reps == exp_rep:
# same number of controls with one replicate as number of experiment replicates
# method is biosample
if args.debug:
print("BIOSAMPLE {}".format(acc))
backfill.multi_control(obj)
else:
if args.debug:
print("Experiment {} contains {} experiment replicates and {} control replicates between {} total controls and so does not fit the current pattern!".format(
acc, exp_rep, con_rep, exp_con))
else:
if args.debug:
print(
"Experiment {} does not fit any of the current patterns!".format(acc))
if len(backfill.dataList) > 0:
print("Experiment\tMethod\tExperimentFile\tControlFile")
for data in backfill.dataList:
print("{ExpAcc}\t{Method}\t{ExpFile}\t{ConFile}".format(
ExpAcc=data["ExpAcc"], Method=data["Method"], ExpFile=data["ExpFile"], ConFile=data["ConFile"]))
if __name__ == '__main__':
main()
|
983,424 | d61aa68e377b52855f1113dc43f81fda4b333297 | #!/usr/bin/python3
"""This is the place class"""
from models.base_model import BaseModel, Base
from models.review import Review
from sqlalchemy import Table, Column, String, ForeignKey, Float, Integer
from sqlalchemy.orm import relationship
from os import environ
place_amenity = Table('place_amenity', Base.metadata,
Column('place_id', String(60),
ForeignKey('places.id'), primary_key=True),
Column('amenity_id', String(60),
ForeignKey('amenities.id'), primary_key=True))
class Place(BaseModel, Base):
"""This is the class for Place
Attributes:
__tablename__: table name in MySQL
city_id: city id
user_id: user id
name: name input
description: string of description
number_rooms: number of room in int
number_bathrooms: number of bathrooms in int
max_guest: maximum guest in int
price_by_night:: pice for a staying in int
latitude: latitude in flaot
longitude: longitude in float
amenity_ids: list of Amenity ids
reviews: list of rewiews
amenities: relationship many to many with Amenity
"""
__tablename__ = "places"
city_id = Column(String(60), ForeignKey('cities.id'), nullable=False)
user_id = Column(String(60), ForeignKey('users.id'), nullable=False)
name = Column(String(128), nullable=False)
description = Column(String(1024), nullable=True)
number_rooms = Column(Integer, default=0, nullable=False)
number_bathrooms = Column(Integer, default=0, nullable=False)
max_guest = Column(Integer, default=0, nullable=False)
price_by_night = Column(Integer, default=0, nullable=False)
latitude = Column(Float, nullable=True)
longitude = Column(Float, nullable=True)
amenity_ids = []
if environ.get('HBNB_TYPE_STORAGE') == "db":
cities = relationship('City', foreign_keys=[city_id],
back_populates='places')
user = relationship('User', foreign_keys=[user_id],
back_populates='places')
amenities = relationship("Amenity ", secondary=place_amenity,
viewonly=False)
else:
@property
def reviews(self):
"""getter attribute reviews that returns the list of Review
instances with place_id equals to the current Place.id
Return:
list of reviews
"""
list_reviews = []
all_reviews = models.storage.all(Review)
for review_item in all_reviews.items():
if review_item.place_id == self.id:
list_review.append(review_item)
return list_review
@property
def amenities(self):
"""Getter attribute amenities that returns the list of Amenity
instances based on the attribute amenity_ids that contains all
Amenity.id linked to the Place
Return:
list of amenities
"""
list_amenities = []
for amenity_obj in amenity_ids:
if amenity_obj.id == self.id:
list_amenities.append(amenity_obj)
return list_amenities
@amenities.setter
def amenities(self, amenity_obj):
"""Setter attribute amenities that handles append method for adding
an Amenity.id to the attribute amenity_ids
"""
if amenity_obj.__class__.name == "Amenity":
self.amenity_ids.append(amenity_obj)
|
983,425 | 0f68ba5eb8700fe4818e3c7c07a5ff7d988c7c58 | from output.models.nist_data.list_pkg.unsigned_int.schema_instance.nistschema_sv_iv_list_unsigned_int_length_2_xsd.nistschema_sv_iv_list_unsigned_int_length_2 import NistschemaSvIvListUnsignedIntLength2
obj = NistschemaSvIvListUnsignedIntLength2(
value=[
4133421163,
4173232134,
4141224163,
4162631181,
4172244133,
4151834133,
]
)
|
983,426 | b17cc9d1e2ac3fbe4b883cbfd702f0ee4d39d98a | #w3school é a referência para Python
class Pessoa:
#construtor
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
def sono(self):
return self.idade / 3
a = Pessoa("Flavio",21)
print(a.nome)
print(a.idade)
print(a.nome," você já dormiu ", a.sono(), " anos")
|
983,427 | 1f8e45f810c14ec5614785062c96a9a2e5a0c3b8 | import logging
import os
import sys
from os import getenv
import base64
# get this file's directory independent of where it's run from
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "vendored"))
import requests
from capparselib.parsers import CAPParser
from dogpile.cache import make_region
ALERTED_USERPASS = getenv('ALERTED_USERPASS', 'admin:password')
RACK_ENV = getenv('RACK_ENV', 'development')
ALERTED_API = getenv('ALERTED_API', 'http://localhost:8000/api/v1/alerts/')
HEADERS = {'Content-Type': 'application/xml',
'Authorization': 'Basic %s' % base64.b64encode(ALERTED_USERPASS.encode()).decode()}
REDIS_URL = getenv('REDIS_URL', 'redis://localhost:6379/0')
def get_cache():
"""
A function to return a filesystem based cache object
:return:
"""
if RACK_ENV == "production":
region = make_region().configure(
'dogpile.cache.redis',
arguments={
'url': REDIS_URL,
'redis_expiration_time': 60 * 60 * 2, # 2 hours
'distributed_lock': True
}
)
elif RACK_ENV == "staging":
region = make_region().configure(
'dogpile.cache.null'
)
elif RACK_ENV in ["development", "ci"]:
region = make_region().configure(
'dogpile.cache.dbm',
expiration_time=86400,
arguments={
"filename": "cache"
}
)
return region
def transmit(alerts, trace_entity=None):
"""
A function to transmit XML to Alerted web service
:param alerts: XML CAP1.2 alerts to transmit
:return:
"""
#
# Determine if the alert can be parsed as valid CAP XML
# This will be erased on each deploy to Heroku, but that's OK
cache = get_cache()
result = False
logging.info("Querying cache and attempting to transmit %s alerts" % len(alerts))
for alert in alerts:
# TODO standaridise input a bit better
if isinstance(alert, bytes):
alert = alert.decode()
alert = alert.replace("\n", '').replace("\t", '')
identifier = ''
active = False
try:
alert_list = CAPParser(alert).as_dict()
identifier = str(alert_list[0]['cap_id'])
active = cache.get(identifier)
except Exception:
logging.error("Potentially invalid alert")
if not active and identifier:
resp = requests.post(url=ALERTED_API, data=alert, headers=HEADERS)
logging.info(resp)
if resp.status_code == 201:
cache.set(identifier, "submitted")
result = True
elif resp.status_code == 400:
print("Invalid query (duplicate?) %s" % identifier)
cache.set(identifier, "invalid")
else:
print("Unable to submit alert (%s) %s" % (str(resp.status_code), identifier))
return result
|
983,428 | 39bee0a443c21df3aed5b3e81432f34d74ed447e | from django.http import HttpResponseRedirect, Http404
from django.core.paginator import Paginator, InvalidPage, EmptyPage
def paginate(context, result_key, query, count, page, root):
if page == '1' or page == 1:
return HttpResponseRedirect(root)
elif page is None:
page = 1
else:
page = int(page)
p = Paginator(query, count)
if p.num_pages >= page and page is not 0:
context['pagination'] = {}
context['pagination']['current_page'] = page
context['pagination']['root'] = root
context['pagination']['page_range'] = p.page_range
context['pagination']['num_pages'] = p.num_pages
context[result_key] = p.page(page).object_list
if p.page(page).has_previous() == True:
context['pagination']['previous_page'] = p.page(page).previous_page_number()
if p.page(page).has_next() == True:
context['pagination']['next_page'] = p.page(page).next_page_number()
else:
raise Http404
|
983,429 | 9b2b0f0f754dfd9eb809821a27b435966d88f174 | #!/bin/bash/python
# -*- coding: utf-8 -*-
# ȫ�ֱ���
from django.conf import settings
def lang(request):
return {'lang': settings.LANGUAGE_CODE} |
983,430 | 2719a15cc8ee43afdb10152b09cdf3628c450ccb | from selenium.webdriver.common.by import By
class BasePageLocators():
BASKET_LINK = (By.CSS_SELECTOR, ".btn-group > a.btn")
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
LOGIN_LINK_INVALID = (By.CSS_SELECTOR, "#login_link_inc")
USER_ICON = (By.CSS_SELECTOR, ".icon-user")
class BasketPageLocators():
BASKET_EMPTY_TEXT = (By.CSS_SELECTOR, "#content_inner > p")
PRODUCTS_IN_BASKET = (By.CSS_SELECTOR, "#basket_formset")
class LoginPageLocators():
LOGIN_FORM = (By.CSS_SELECTOR, ".login_form")
REGISTER_FORM = (By.CSS_SELECTOR, ".register_form")
REGISTER_EMAIL = (By.CSS_SELECTOR, "#id_registration-email")
REGISTER_PASSWORD = (By.CSS_SELECTOR, "#id_registration-password1")
REGISTER_CONFIRM_PASSWORD = (By.CSS_SELECTOR, "#id_registration-password2")
REGISTER_BUTTON = (By.CSS_SELECTOR, "[name='registration_submit']")
class MainPageLocators():
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
class ProductPageLocators():
ADDED_PRODUCT_NAME = (By.CSS_SELECTOR, "#messages > .alert:nth-child(1) strong")
ADDED_PRODUCT_PRICE = (By.CSS_SELECTOR, "#messages > .alert:nth-child(3) strong")
BUTTON_ADD_TO_BASKET = (By.CSS_SELECTOR, "#add_to_basket_form > button")
PRODUCT_NAME = (By.CSS_SELECTOR, ".product_main > h1")
PRODUCT_PRICE = (By.CSS_SELECTOR, ".product_main > .price_color")
SUCCESS_MESSAGE = (By.CSS_SELECTOR, "#messages > .alert:nth-child(1) strong")
|
983,431 | ec738367c406378dbcc8d0811f8b34fe305970d2 | # modify from clovaai
import random
import lmdb
import six
import numpy as np
from PIL import Image
from .base import BaseDataset
from .registry import DATASETS
@DATASETS.register_module
class LmdbDataset(BaseDataset):
def __init__(self, *args, **kwargs):
self.index_list = []
super(LmdbDataset, self).__init__(*args, **kwargs)
def get_name_list(self):
self.env = lmdb.open(self.root, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
with self.env.begin(write=False) as txn:
n_samples = int(txn.get('num-samples'.encode()))
for index in range(n_samples):
idx = index + 1 # lmdb starts with 1
label_key = 'label-%09d'.encode() % idx
label = txn.get(label_key).decode('utf-8')
if self.filter(label): # if length of label larger than max_len, drop this sample
continue
else:
self.index_list.append(idx)
self.samples = len(self.index_list)
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index = self.index_list[index]
with self.env.begin(write=False) as txn:
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
img_key = 'image-%09d'.encode() % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('RGB') # for color image
img = np.array(img)
except IOError:
print(f'Corrupted image for {index}')
img, label = self.__getitem__(random.choice(range(len(self))))
return img, label
if self.transforms:
try:
aug = self.transforms(image=img, label=label)
img, label = aug['image'], aug['label']
except:
return self.__getitem__(random.choice(range(len(self))))
return img, label
|
983,432 | d1531f74a42310b78c0214ed42b4a0adfbcc6963 | # encoding: utf-8
"""
@author: Dechao Meng
@contact: dechao.meng@vipl.ict.ac.cn
"""
import glob
import re
import os.path as osp
from pathlib import Path
import time
import numpy as np
import pickle as pkl
from vehicle_reid_pytorch.data.datasets.bases import ReIDMetaDataset, relabel, get_imagedata_info
class CommonReIDDataset(ReIDMetaDataset):
def __init__(self, pkl_path, verbose=True, test_ext='', **kwargs):
"""
test_ext: For VehicleID and VERIWild, there are multi test sets. Pass the test ext to select which one to use.
"""
metas = pkl.load(open(pkl_path, 'rb'))
self.train = metas["train"]
self.query = metas["query" + str(test_ext)]
self.gallery = metas["gallery" + str(test_ext)]
self.relabel()
if verbose:
print("=> Dataset loaded")
self.print_dataset_statistics()
self.num_train_ids, self.num_train_imgs, self.num_train_cams = get_imagedata_info(self.train)
self.num_query_ids, self.num_query_imgs, self.num_query_cams = get_imagedata_info(self.query)
self.num_gallery_ids, self.num_gallery_imgs, self.num_gallery_cams = get_imagedata_info(self.gallery)
|
983,433 | d6e52fa877be43b303210c6c415b5fde9f4d1b64 | from __future__ import annotations
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.travelport.com/schema/air_v52_0"
@dataclass
class DocumentModifiers:
"""
Parameters
----------
generate_itinerary_invoice
Generate itinerary/invoice documents along with ticket
generate_accounting_interface
Generate interface message along with ticket
"""
class Meta:
namespace = "http://www.travelport.com/schema/air_v52_0"
generate_itinerary_invoice: bool = field(
default=False,
metadata={
"name": "GenerateItineraryInvoice",
"type": "Attribute",
}
)
generate_accounting_interface: bool = field(
default=False,
metadata={
"name": "GenerateAccountingInterface",
"type": "Attribute",
}
)
|
983,434 | d5b89046d11fdec64e45af69a182e3852d836712 | #!/usr/bin/python
# Copyright 2018 Blade M. Doyle
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Get the payment estimate records from REDIS and generate a report
import sys
import json
import pickle
import pprint
import traceback
from grinbase.dbaccess import database
from grinbase.model.pool_blocks import Pool_blocks
from grinlib import lib
from grinlib import pool
PROCESS = "paymentReport"
LOGGER = None
CONFIG = None
NUM_BLOCKS = 2500
def main():
CONFIG = lib.get_config()
LOGGER = lib.get_logger(PROCESS)
LOGGER.warn("=== Starting {}".format(PROCESS))
# Connect to DB
database = lib.get_db()
database.db.initializeSession()
pp = pprint.PrettyPrinter(indent=4)
# Fetch and print pool block reward estimates for latest N pool blocks
try:
pool_blocks = Pool_blocks.get_latest(NUM_BLOCKS)
pool_blocks_h = [blk.height for blk in pool_blocks]
LOGGER.warn("Will report estimates for pool blocks: {}".format(pool_blocks_h))
# Print Estimate
for height in pool_blocks_h:
pp.pprint("Eestimate for block: {}".format(height))
payout_map = pool.get_block_payout_map_estimate(height, LOGGER)
pp.pprint(payout_map)
except Exception as e: # AssertionError as e:
LOGGER.error("Something went wrong: {} - {}".format(e, traceback.print_stack()))
LOGGER.warn("=== Completed {}".format(PROCESS))
if __name__ == "__main__":
main()
|
983,435 | a8c608166d546e42bc8dd4e67e22a1d6f308d3cc | import emoji, time
#TODO: Make flexible
def do_replaces(msg, message, match):
for i in range(len(match.groups()) + 1):
group = match.group(i)
if group != None:
message = message.replace('$%s' % i, group)
if 'from' in msg.keys():
message = message.replace('$name', msg['from']['first_name'])
message = message.replace('$time', time.strftime('%H:%M'))
message = emoji.emojize(message, True)
return message |
983,436 | 568b61f080563c5c73e3caa620e0d5bee86d3b4b | #!/usr/bin/env python
#***************************************************************************************
# Expenses and income
#
#
#
#
# 1
# 2
# 3
water=45
hoa=277
electric=75
internet=40
phone=125
land=1000
xterra=575
psu=1000
insurance=100+25+30
mpg=20
miles=1250
auto_gas=(miles/mpg)*3
home_gas=75
condo_repairs=200
food=750
etc=600
subtotal=etc+food+condo_repairs+auto_gas+phone+electric+water+hoa+internet+insurance
expenses=subtotal+land+xterra+psu
income=7500
#***************************************************************************************
print "HOA : ",hoa
print "Water : ",water
print "Internet : ",internet
print "Phone : ",phone
print "Electric : ",electric
print "Repairs/upgrades : ",condo_repairs
print "Insurance : ",insurance
print "Auto Gas : ",auto_gas
print "Home Gas : ",home_gas
print "ETC : ",etc
print "Food : ",food
print "Subtotal : ",subtotal
print "Xterra : ",xterra
print "Land : ",land
print "PSU : ",psu
print "Income : ",income
print "Total : ",expenses
print "Savings : ",income-expenses
|
983,437 | 14a8490c09cb8bc5f4a694f33a9ac9633357d48f | import os
import functools
import zipfile
from .logger import get_logger
from . import storage
logger = get_logger('enigma_common.provider')
class Provider:
def __init__(self, config: dict):
self.config = config
self.CONTRACT_DISCOVERY_ADDRESS = config['CONTRACT_DISCOVERY_ADDRESS']
self.KM_DISCOVERY_ADDRESS = config['KEY_MANAGEMENT_DISCOVERY']
self._enigma_contract_filename = config.get('ENIGMA_CONTRACT_FILENAME', 'enigmacontract.txt')
self._token_contract_filename = config.get('TOKEN_CONTRACT_FILENAME', 'enigmatokencontract.txt')
self._voting_contract_filename = config.get('TOKEN_CONTRACT_FILENAME', 'votingcontract.txt')
self._sample_contract_filename = config.get('TOKEN_CONTRACT_FILENAME', 'samplecontract.txt')
self._principal_address_directory = config.get('PRINCIPAL_ADDRESS_DIRECTORY', 'public')
# if os.getenv('SGX_MODE', 'HW') == 'SW':
# self._principal_address_filename = config.get('PRINCIPAL_ADDRESS_FILENAME', 'principal-sign-addr_sw.txt')
# else:
self._principal_address_filename = config.get('PRINCIPAL_ADDRESS_FILENAME', 'principal-sign-addr.txt')
self._enigma_token_abi_directory = config.get('TOKEN_CONTRACT_ABI_DIRECTORY', 'contract')
self._enigma_token_abi_filename = config.get('TOKEN_CONTRACT_ABI_FILENAME', 'EnigmaToken.json')
self._enigma_token_abi_filename_zip = config.get('ENIGMA_CONTRACT_ABI_FILENAME_ZIPPED', 'EnigmaToken.zip')
self._enigma_contract_abi_directory = config.get('ENIGMA_CONTRACT_ABI_DIRECTORY', 'contract')
self._km_abi_directory = config.get('PRINCIPAL_ADDRESS_DIRECTORY', 'contract')
self._km_abi_filename = config.get('PRINCIPAL_ADDRESS_FILENAME', 'IEnigma.json')
if os.getenv('SGX_MODE', 'HW') == 'SW':
self._enigma_contract_abi_filename = config.get('ENIGMA_CONTRACT_ABI_FILENAME_SW',
'EnigmaSimulation.json')
self._enigma_contract_abi_filename_zip = config.get('ENIGMA_CONTRACT_ABI_FILENAME_ZIPPED_SW',
'EnigmaSimulation.zip')
else:
self._enigma_contract_abi_filename = config.get('ENIGMA_CONTRACT_ABI_FILENAME', 'Enigma.json')
self._enigma_contract_abi_filename_zip = config.get('ENIGMA_CONTRACT_ABI_FILENAME_ZIPPED', 'Enigma.zip')
# strategy for information we get from enigma-contract
self.contract_strategy = {"COMPOSE": storage.HttpFileService(self.CONTRACT_DISCOVERY_ADDRESS),
"COMPOSE_DEV": storage.HttpFileService(self.CONTRACT_DISCOVERY_ADDRESS),
"K8S": storage.HttpFileService(self.CONTRACT_DISCOVERY_ADDRESS),
"TESTNET": storage.HttpFileService(self.CONTRACT_DISCOVERY_ADDRESS),
"MAINNET": storage.HttpFileService(self.CONTRACT_DISCOVERY_ADDRESS)}
self.key_management_discovery = {"COMPOSE": storage.HttpFileService(self.KM_DISCOVERY_ADDRESS, namespace='km'),
"COMPOSE_DEV": storage.HttpFileService(self.KM_DISCOVERY_ADDRESS, namespace='km'),
"K8S": storage.HttpFileService(self.KM_DISCOVERY_ADDRESS, namespace='km'),
"TESTNET": storage.HttpFileService(self.KM_DISCOVERY_ADDRESS, namespace='km'),
"MAINNET": storage.HttpFileService(self.KM_DISCOVERY_ADDRESS, namespace='km')}
# information stored in global storage
self.backend_strategy = {"COMPOSE": storage.AzureContainerFileService,
"COMPOSE_DEV": storage.HttpFileService(self.CONTRACT_DISCOVERY_ADDRESS),
"K8S": storage.AzureContainerFileService,
"TESTNET": storage.AzureContainerFileService,
"MAINNET": storage.AzureContainerFileService}
self._enigma_abi = None
self._enigma_token_abi = None
self._enigma_contract_address = None
self._principal_address = None
self._token_contract_address = None
self._key_management_abi = None
@property
@functools.lru_cache()
def key_management_abi(self):
return self.get_file(directory_name=self._km_abi_directory,
file_name=self._km_abi_filename)
@property
@functools.lru_cache()
def enigma_contract_address(self):
return self._deployed_contract_address(contract_name=self._enigma_contract_filename)
@property
@functools.lru_cache()
def token_contract_address(self):
return self._deployed_contract_address(contract_name=self._token_contract_filename)
@property
@functools.lru_cache()
def voting_contract_address(self):
return self._deployed_contract_address(contract_name=self._voting_contract_filename)
@property
@functools.lru_cache()
def sample_contract_address(self):
return self._deployed_contract_address(contract_name=self._sample_contract_filename)
@property
@functools.lru_cache()
def principal_address(self):
fs = self.key_management_discovery[os.getenv('ENIGMA_ENV', 'COMPOSE')]
timeout = self.config.get("KEY_MANAGEMENT_TIMEOUT", 120)
is_contract_ready = self._wait_till_open(timeout=timeout, fs=fs)
if not is_contract_ready:
logger.error(f'Key management address wasn\'t ready before timeout (120s) expired')
raise TimeoutError(f'Timeout for server @ {self.KM_DISCOVERY_ADDRESS}')
return fs[self._principal_address_filename]
@property
@functools.lru_cache()
def enigma_abi(self):
zipped = self.get_file(directory_name=self._enigma_contract_abi_directory,
file_name=self._enigma_contract_abi_filename_zip)
return self._unzip_bytes(zipped, self._enigma_contract_abi_filename)
@property
@functools.lru_cache()
def enigma_token_abi(self):
zipped = self.get_file(directory_name=self._enigma_token_abi_directory,
file_name=self._enigma_token_abi_filename_zip)
return self._unzip_bytes(zipped, self._enigma_token_abi_filename)
@property
@functools.lru_cache()
def voting_abi(self):
zipped = self.get_file(directory_name=self._voting_abi_directory,
file_name=self._voting_abi_filename_zip)
return self._unzip_bytes(zipped, self._voting_abi_filename)
@property
@functools.lru_cache()
def sample_abi(self):
zipped = self.get_file(directory_name=self._sample_abi_directory,
file_name=self._sample_abi_filename_zip)
return self._unzip_bytes(zipped, self._sample_abi_filename)
def get_file(self, directory_name: str, file_name) -> bytes:
fs = self.backend_strategy[os.getenv('ENIGMA_ENV', 'COMPOSE')](directory_name)
try:
return fs[file_name]
except PermissionError as e:
logger.critical(f'Failed to get file, probably missing credentials. {e}')
except ValueError as e: # not sure what Exceptions right now
logger.critical(f'Failed to get file: {e}')
except Exception as e: # not sure what Exceptions right now
logger.critical(f'Failed to get file: {type(e)} - {e}')
exit(-1)
def _wait_till_open(self, timeout: int = 60, fs: storage.HttpFileService = None) -> bool:
_fs = fs or self.contract_strategy[os.getenv('ENIGMA_ENV', 'COMPOSE')]
import time
for _ in range(timeout):
if _fs.is_ready():
return True
time.sleep(1)
return False
def _get_contract_address(self, contract_name):
fs = self.contract_strategy[os.getenv('ENIGMA_ENV', 'COMPOSE')]
return fs[contract_name]
def _deployed_contract_address(self, contract_name):
logger.debug(f'Waiting for enigma-contract @ http://{self.CONTRACT_DISCOVERY_ADDRESS} for enigma contract')
timeout = self.config.get("CONTRACT_TIMEOUT", 60)
# wait for contract to be ready
is_contract_ready = self._wait_till_open(timeout=timeout)
if not is_contract_ready:
logger.error(f'Contract address wasn\'t ready before timeout ({timeout}s) expired')
raise TimeoutError(f'Timeout for server @ {self.CONTRACT_DISCOVERY_ADDRESS}')
return self._get_contract_address(contract_name)
@staticmethod
def _unzip_bytes(file_bytes: bytes, file_name: str) -> bytes:
""" unzip a file to a path """
import io
with zipfile.ZipFile(io.BytesIO(file_bytes), "r") as zip_ref:
return zip_ref.read(file_name)
|
983,438 | fa6a7ef33d6de2596cfdff098b63a2286b8c878b | import os
from unittest import TestCase
import pytest
import requests_mock
from bs4 import BeautifulSoup
from models import InitiativeImport
from platformen.nlvoorelkaar import NLvoorElkaarSource, NLvoorElkaar
class TestNLvoorElkaarPlatformSource(TestCase):
@requests_mock.Mocker()
def setUp(self, request_mock):
# for a large part a cloen of Wij Amsterdam test
test_path = os.path.dirname(__file__)
file_path = os.path.join(test_path, "test_responses", "nlvoorelkaar_supply.html")
with open(file_path, 'r') as data_file:
self.response = data_file.read()
scraper = NLvoorElkaar()
self.source = scraper._sources[0]
self.url = "https://www.nlvoorelkaar.nl/hulpaanbod/179582"
request_mock.get(self.url, text=self.response, status_code=200)
self.request_mock = request_mock
self.actual = InitiativeImport(source_id=179582, source_uri=self.url)
scraper._sources[0].complete(self.actual)
def test_table_name(self):
assert self.actual.name == "Aanbod van Joeri"
def test_table_category(self):
assert self.actual.category == "Coronahulp"
def test_table_organisation_kind(self):
assert self.actual.organisation_kind == "een vrijwilliger"
def test_description(self):
assert self.actual.description.startswith("Naast het schrijven van mijn scriptie zou ik graag mensen helpen")
def test_alternative_place_regex(self):
assert self.actual.location == "Amstelveen"
@pytest.mark.skip(reason="Test methods for debugging specific items")
def test_missing_plaats(self):
scraper = NLvoorElkaar()
item = scraper._sources[0].complete(InitiativeImport(
source_id=179582,
source_uri="https://www.nlvoorelkaar.nl/hulpaanbod/179582"
))
|
983,439 | df1f1cbae9dacc73b6f83054fc1f31f83192782d | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-29 08:12
from __future__ import unicode_literals
from django.db import migrations, models, connection
import uuid
def create_uuids(apps, schema_editor):
Notification = apps.get_model('notifications', 'Notification')
ids = [ x.get('id') for x in Notification.objects.all().values()]
cursor = connection.cursor()
for id in ids:
cursor.execute("update notifications_notification set id = '{0}' where id = '{1}'".format(uuid.uuid4(), id))
class Migration(migrations.Migration):
dependencies = [
('notifications', '0006_indexes'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='id',
field=models.CharField(max_length=50, primary_key=True, serialize=False),
),
migrations.RunPython(create_uuids, reverse_code=migrations.RunPython.noop),
]
|
983,440 | bd1c3d46977c67c0a4ea5d0c2c83173894e0bece | """
_InsertRecoConfig_
Oracle implementation of InsertRecoConfig
"""
from WMCore.Database.DBFormatter import DBFormatter
class InsertRecoConfig(DBFormatter):
def execute(self, binds, conn = None, transaction = False):
sql = """INSERT INTO reco_config
(RUN_ID, PRIMDS_ID, DO_RECO, RECO_SPLIT, WRITE_RECO, WRITE_DQM,
WRITE_AOD, WRITE_MINIAOD, WRITE_NANOAOD, PROC_VERSION, ALCA_SKIM, PHYSICS_SKIM,
DQM_SEQ, CMSSW_ID, MULTICORE, SCRAM_ARCH, GLOBAL_TAG)
VALUES (:RUN,
(SELECT id FROM primary_dataset WHERE name = :PRIMDS),
:DO_RECO,
:RECO_SPLIT,
:WRITE_RECO,
:WRITE_DQM,
:WRITE_AOD,
:WRITE_MINIAOD,
:WRITE_NANOAOD,
:PROC_VER,
:ALCA_SKIM,
:PHYSICS_SKIM,
:DQM_SEQ,
(SELECT id FROM cmssw_version WHERE name = :CMSSW),
:MULTICORE,
:SCRAM_ARCH,
:GLOBAL_TAG)
"""
self.dbi.processData(sql, binds, conn = conn,
transaction = transaction)
return
|
983,441 | f4f0ef635ae4a9ebac7f30a85d6113ef316158ef | # -*- coding: utf-8 -*-
# @Time : 2018/5/7 14:46
# @Author : 流沙
# @Site :
# @File : urls.py
# @Software: PyCharm
import requests
import json
from Zabbix.dbinfo import findDataSource
class ZabbixApi(object):
"""
Zabbix API类
"""
#超时时间(5秒钟)
TIMEOUT=5
class FailedError(Exception):
"""
使用Zabbix API失败时出错
"""
ERROR_MESSAGE_TEMPLATE = '"{message}({code}): {data}"'
def __init__(self,name,reason = None):
"""
构造函数
:param name: 失败的方法名称
:param reason: 错误响应
"""
message = "Failed to {0}.".format(name)
if reason is not None:
message = ''.join([message,self.ERROR_MESSAGE_TEMPLATE.format(**reason)])
super(ZabbixApi.FailedError,self).__init__(message)
class AuthenticationFailedError(FailedError):
"""
验证ZabbixToken失败
"""
def __init__(self,reason = None):
"""
构造函数
:param reason: 失败的方法名称
"""
super(ZabbixApi.AuthenticationFailedError,self).__init__('authenticate',reason)
def __init__(self,encode = 'utf-8'):
"""
构造函数
:param request_id:JSON-RPC请求标识符
"""
self.AUTH = findDataSource()
# def __enter__(self):
# self.authenticate()
# return self
def call(self,method,params):
"""
ZabbixAPI请求程序
:param method: Zabbix API方法名称
:param params: Zabbix API方法参数
:param through_authenticate: 事前预认证
:return:
"""
body = json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'auth': self.AUTH['token'],
'id': 2
})
headers = {'Content-Type': 'application/json-rpc'}
try:
request = requests.post(self.AUTH['uri'],data=body,headers=headers,timeout=self.TIMEOUT)
response_json = request.json()
if 'result' in response_json:
return response_json
elif 'error' in response_json:
return ZabbixApi.FailedError(name=method,reason=response_json['error'])
else:
return ZabbixApi.AuthenticationFailedError()
except requests.exceptions.ConnectTimeout:
return ZabbixApi.AuthenticationFailedError({'code': -1, 'message': 'Connect Timeout.', 'data': 'URI is incorrect.'})
# def authenticate(self):
# """
# 执行认证
# :return:
# """
# response = self.call('user.login', {'user': self.DataSource['username'], 'password': self.DataSource['password']}, True)
# print(response)
# if 'result' in response:
# self.session_id = response['result']
# return response['result']
# elif 'error' in response:
# raise ZabbixApi.AuthenticationFailedError(response['error'])
# else:
# raise ZabbixApi.AuthenticationFailedError()
#
# method = 'hostgroup.update'
# params = {
# "groupid": 15,
# "name": 'TEST1'
# }
# # params = json.loads('{"search": {"name": "Templates/Modules"}}')
# api = ZabbixApi()
# print(api.call(method,params)) |
983,442 | 05a335a991c98d90868ba94103ba63cffc1b0933 | # Time: O(logn/logm), n is numBottles, m is numExchange
# Space: O(1)
class Solution(object):
def numWaterBottles(self, numBottles, numExchange):
"""
:type numBottles: int
:type numExchange: int
:rtype: int
"""
result = numBottles
while numBottles >= numExchange:
numBottles, remainder = divmod(numBottles, numExchange)
result += numBottles
numBottles += remainder
return result
|
983,443 | 866bf936020af1e95b7b136edf5526450396474e | import os
from unittest import TestCase
from telegrampy import utils
from telegrampy.exceptions import MissingTelegramBotToken, MissingTelegramChatId
class TestUtils(TestCase):
def test_get_telegram_bot_token(self):
with self.assertRaises(MissingTelegramBotToken):
utils.get_telegram_bot_token()
os.environ["TELEGRAM_BOT_TOKEN"] = "foo_token"
token = utils.get_telegram_bot_token()
self.assertEqual(token, "foo_token")
def test_get_telegram_chat_id(self):
with self.assertRaises(MissingTelegramChatId):
utils.get_telegram_chat_id()
os.environ["TELEGRAM_CHAT_ID"] = "foo_chat_id"
chat_id = utils.get_telegram_chat_id()
self.assertEqual(chat_id, "foo_chat_id")
|
983,444 | cca4ab172aa11bf187c8903f14135e64b2f3cae8 | import turtle
t=turtle.Turtle()
t.penup()
t.setpos(-20,40)
t.pendown()
t.pensize(10)
t.pencolor("pink")
t.forward(100)
t.backward(100)
t.right(90)
t.forward(100)
t.left(90)
t.forward(100)
t.backward(100)
t.right(90)
t.forward(100)
t.left(90)
t.forward(100)
turtle.done()
|
983,445 | 5fa538e24c1256db94af648cf4db11342b35ac96 | from random import shuffle
a = [3,6,8,7]
shuffle(a)
print(a) |
983,446 | e281d782efa6a8e33876adb68a4b31f284f53685 | from django.contrib import admin
from employee.models import Employee
class AuthorAdmin(admin.ModelAdmin):
# pass
list_display = ('eid','ename', 'eemail', 'econtact')
admin.site.register(Employee, AuthorAdmin)
|
983,447 | 9cb4bb0ade3d7f2ecfbc734b77e854ba5d1b0210 | from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
send_obj = {'a': [1, 2.4, 'abc', -2.3+3.4J],
'b': {2, 3, 4}}
if rank ==0:
send_req = comm.isend(send_obj,dest=1,tag=11)
send_req.wait()
print('process %d sends %s'%(rank,send_obj))
elif rank == 1:
recv_req= comm.irecv(source=0,tag=11)
recv_obj = recv_req.wait()
print('process %d receives %s'%(rank,recv_obj))
|
983,448 | 7e328fcbf5a0424d774ff01eb1daa911559541b7 | #encoding=utf-8
#author: liang xinyan
#email: liangxinyan48@163.com
import tensorflow as tf
import numpy as np
import os
import config
opt = os.path
paras = config.get_configs()
nb_view = paras['nb_view']
classes = paras['classes']
image_size = paras['image_size']
w, h, c = image_size['w'], image_size['h'], image_size['c']
def get_data(data_base_dir='..'):
print('Data loading ......')
train_x = np.load(os.path.join(data_base_dir, 'train_X.npy'))
test_x = np.load(os.path.join(data_base_dir, 'test_X.npy'))
if c == 1:
train_x = np.expand_dims(train_x, axis=-1)
test_x = np.expand_dims(test_x, axis=-1)
train_x = (train_x / 127.5) - 1.
test_x = (test_x / 127.5) - 1.
train_y = np.load(os.path.join(data_base_dir, 'train_Y.npy'))
test_y = np.load(os.path.join(data_base_dir, 'test_Y.npy'))
train_y = tf.keras.utils.to_categorical(train_y)
test_y = tf.keras.utils.to_categorical(test_y)
print('Data loading finished!!!')
return train_x, train_y, test_x, test_y
def get_views(data_name='AWA1', view_data_dir='view', idx_split=0):
from . import AWA1, Reuter, mfeat, nus_wide
if data_name == 'AWA1':
view_train_x, train_y, view_test_x, test_y = AWA1.load_AWA1(
view_data_dir=view_data_dir, idx_split=idx_split)
if data_name == 'Reuters':
view_train_x, train_y, view_test_x, test_y = Reuter.load_Reuter(
view_data_dir=view_data_dir, idx_split=idx_split)
if data_name == 'mfeat':
view_train_x, train_y, view_test_x, test_y = mfeat.load_mfeat(
view_data_dir=view_data_dir, idx_split=idx_split)
if data_name == 'nus_wide':
view_train_x, train_y, view_test_x, test_y = nus_wide.load_nus_wide(
view_data_dir=view_data_dir, idx_split=idx_split)
if data_name in ['ChemBook', 'Chembl', 'PubChem', 'tiny-imagenet200']:
models_ls = ['resnet50', 'desnet121', 'MobileNetV2', 'Xception', 'InceptionV3']
if nb_view == 10:
models_ls = models_ls+['resnet18', 'resnet34', 'desnet169', 'desnet201', 'NASNetMobile']
view_train_x = []
view_test_x = []
for model in models_ls:
view_train_x.append(np.load(os.path.join(view_data_dir, model+'train_X.npy')))
view_test_x.append(np.load(os.path.join(view_data_dir, model+'test_X.npy')))
train_y = np.load(os.path.join(view_data_dir, 'train_Y.npy'))
test_y = np.load(os.path.join(view_data_dir, 'test_Y.npy'))
train_y = tf.keras.utils.to_categorical(train_y, classes)
test_y = tf.keras.utils.to_categorical(test_y, classes)
# import keras
# keras.utils.to_categorical()
return view_train_x, train_y, view_test_x, test_y
def preprocess_input(data_saved_dir='database', save_name='x'):
'''
Preprocess tha data to ensure model to be able to it
:param save_dir: path of data to save
:param save_name: name of data
:return: preprocessed data
'''
x = np.load(os.path.join(data_saved_dir, save_name+'.npy'))
if len(x.shape) == 3:
x = np.expand_dims(x, axis=-1)
x = (x / 127.5) - 1.
print('Data loading finished!!!')
return x
if __name__ == '__main__':
base_dir = opt.join('fn')
# train_fns, train_y, test_fns, test_y = get_image_paths(base_dir=base_dir)
# train_fns = [opt.join('data', v) for v in train_fns]
# train_fns = [v.split('_')[0] for v in train_fns]
# print(len(set(train_fns)))
# print(train_fns)
|
983,449 | 1a9e6b50997bc6c26fc6cf160c671423bdaea125 | from django.http.response import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.shortcuts import redirect, render
from django.db.models import Q
from .models import Post, Form, Response
from .forms import PostForm, QuestionFormForm
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required, user_passes_test
from django.core.paginator import Paginator
from django.contrib import messages
import csv
from userApp.models import *
# Create your views here.
def can_view_post(user):
"""Determines who can access the search bar
Args:
User
Returns:
Boolean: True if student or admin, false if organization
"""
#only students and admins may use the search, submitForm functions
return (not bool(user.is_staff) or user.is_superuser)
@login_required(login_url='login')
@user_passes_test(can_view_post)
def search(request):
"""
Allow students to search for a scholarship with a search engine
**Context**
result: All of the posts that fall under the restrictions the user has provided
page_obj: All of the posts on that specific page number
query: Used to put inside the searchbar when we change pages
filter: Used to put inside the filter dropdown when we change pages
sortby: Used to put inside the sorts dropdown when we change pages
``query ``
The argument for what we should search for
``Filter``
Filters out what posts are relevant to the person's needs
``sortby``
The sorting rule for the posts
``pagenum``
The page number that we should be on
**Template:**
:template:`search.html`
"""
filterList = ['everyone', 'black', 'hispanic', 'female', 'lgbt', 'immigrants', 'disabled', 'poor']
sorts = {"Title(A-Z)":"title", "Title(Z-A)":"-title", "Value Increasing":"value", "Value Decreasing":"-value", "Due Date":"dueDate"}
#urls agruments
query = request.GET.get('query', '')
Filter = request.GET.get('filter', 'Default')
sortby = request.GET.get('sortby', 'Default')
pageNum = request.GET.get('page', '1')
postResult = Post.objects.all().order_by(sorts.get(sortby,"UUID"))
if(query):
#search for words in the title, description or organizaton name, then filter out the tags
postResult = Post.objects.filter((Q(title__icontains=query)|Q(description__icontains=query))|Q(organization__username__icontains=query))
if(Filter !='Default' and Filter.lower() in filterList):
#check if the filter the user entered is a tag
postResult = postResult.filter(tags__name__in=[Filter.lower()])
#function to help divide our query into groups of 12 for pagination
pageObj = Paginator(postResult, 12)
#select the page that we want
page = pageObj.page(pageNum)
context = {
'result': postResult,
'page_obj': page,
'query': query,
'filter': Filter,
'sortby': sortby,
}
return render(request, 'search.html', context=context)
@login_required(login_url='login')
@user_passes_test(can_view_post)
def submitForm(request,formcode=None):
"""
Allow students to submit answers to a form
Args:
UUID: formcode (UUID to the question form)
**Context**
responseForm: A form that allows students to answer the questions that the organization asks
postForm: The post that is related to the question form, so we can display some information about it for the student
``response ``
A new instance of a response object
**Template:**
:template:`response.html`
:template:`sucesss.html` (If the user can submit the form)
:template:`error.html` (If there is an issue with the user submitting the form, typically it is because they have already submitted for that scholarship)
"""
if formcode:
#save the answer form response
if request.method == 'POST':
answer1 = request.POST.get('answer1', '')
answer2 = request.POST.get('answer2', '')
answer3 = request.POST.get('answer3', '')
user = request.user
#users can only submit one form for each scholarship
if len(Response.objects.filter(user_id=user, form_id=formcode)) == 0:
form = Response(answer1=answer1, answer2=answer2, answer3=answer3, user=user, form_id=formcode)
form.save()
return render(request,'success.html')
return render(request, 'error.html')
#Get both the questionform and the postform to display onto the response page
questionForm = get_object_or_404(Form,UUID=formcode)
postForm = Post.objects.filter(Q(link__icontains=formcode))[0]
context = {'form': questionForm, 'UUID': formcode, 'post': postForm}
return render(request, 'response.html', context=context)
else:
return redirect(search)
@login_required(login_url='login')
@staff_member_required
def createPost(request):
"""
Create a question form for students to fill out
**Context**
postmForm: A post that students can find on the search page
``post ``
A new instance of a post object
**Message**
Message that tells the user that they have successfully created a post
**Template:**
:template:`create_post.html`
"""
#save the organization's post
if request.method == 'POST':
form = PostForm(request.user, request.POST, request.FILES)
if form.is_valid():
filterList = ['everyone', 'black', 'hispanic', 'female', 'lgbt', 'immigrants', 'disabled', 'poor']
newpost = form.save()
#Add tags to the object only if in the filterlist
tags = form.cleaned_data.get('tags')
tags = [tag.lower() for tag in tags if tag.lower() in filterList]
newpost.tags.add(*tags)
messages.success(request, 'You have successful created the post')
form = PostForm(request.user)
context = {'form':form}
return render(request, 'create_post.html', context=context)
#form to fill out for the post
form = PostForm(request.user)
context = {'form':form}
return render(request, 'create_post.html', context=context)
@login_required(login_url='login')
@staff_member_required
def createForm(request):
"""
Create a question form for students to fill out
**Context**
QuestionFormForm: A form that allows organizations to ask 3 questions for students
**Message**
Message that contains the link to the question from so the organization can later use
**Template:**
:template:`scholarship.html`
"""
if request.method == 'POST':
form = QuestionFormForm(request.POST)
if form.is_valid():
#return the uuid so the organization can use that link in the post to connect to the questionform
formID = form.save().UUID
#send them the url for the form
messages.success(request, 'You have made your question form accessible at: ' + request.build_absolute_uri('/post/') + f'apply/{formID}')
context = {'form': form}
return render(request, 'scholarship.html', context=context)
form = QuestionFormForm()
context = {'form': form}
return render(request, 'scholarship.html', context=context)
@login_required(login_url='login')
@staff_member_required
def downloadResponse(request, formcode=None):
"""
Downloads the responses to a post
Args:
UUID: formcode (UUID to the question form)
**Template:**
:template:`download.html`
***HttpResponse**
:text/csv:`An CSV file with infomation related to the user, submission date, and all their answers`
"""
if formcode !=None:
response = HttpResponse(content_type='text/csv')
responses = Response.objects.filter(form_id=formcode)
writer = csv.writer(response)
writer.writerow(['User', 'Submit Date', 'Answer1', 'Answer2', 'Answer3'])
for r in responses:
user = User.objects.get(id=r.user_id)
writer.writerow([user, r.submitDate, r.answer1 ,r.answer2 , r.answer3])
response['Content-Disposition'] = 'attachment; filename="response.csv"'
return response
return render(request, 'download.html') |
983,450 | 7390b2895b61cd72aa5881517a9fbc90c82867e6 | # coding=utf-8
from __future__ import absolute_import, division, print_function
import os
import sys
sys.path.append(os.path.abspath('.'))
os.chdir(sys.path[0])
import argparse
import glob
import json
import logging
import os
import random
from dataclasses import dataclass, field
from typing import Optional
import shutil
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from sklearn.model_selection import train_test_split
import pandas as pd
from src.transformers import (MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
get_linear_schedule_with_warmup,
)
from src.transformers import glue_compute_metrics as compute_metrics
from src.transformers import glue_convert_examples_to_features as convert_examples_to_features
from src.transformers import glue_output_modes as output_modes
from src.transformers import glue_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), (), )
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, task, tokenizer, evaluate=False, predict=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
processor = processors[task]()
output_mode = output_modes[task]
key = 'train'
if evaluate:
key = 'dev'
if predict:
key = 'test'
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(key,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
if evaluate:
examples = processor.get_dev_examples(args.data_dir)
elif predict:
examples = processor.get_test_examples(args.data_dir)
else:
examples = processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(
examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=output_mode,
)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
torch.distributed.barrier()
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def predict(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double prediction (matched, mis-matched)
pred_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
pred_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = []
for pred_task, pred_output_dir in zip(pred_task_names, pred_outputs_dirs):
pred_dataset = load_and_cache_examples(args, pred_task, tokenizer, evaluate=False, predict=True)
if not os.path.exists(pred_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(pred_output_dir)
args.pred_batch_size = args.per_gpu_pred_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
pred_sampler = SequentialSampler(pred_dataset)
pred_dataloader = DataLoader(pred_dataset, sampler=pred_sampler, batch_size=args.pred_batch_size)
# multi-gpu pred
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Predict!
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(pred_dataloader, desc="Predicting"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
# XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
)
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
results.extend(preds.tolist())
result = pd.DataFrame(data=results, columns=['label'])
result.to_csv(path_or_buf=os.path.join(args.data_dir, 'keys.csv'), encoding='utf-8', header=None)
return None
|
983,451 | bbd8eb38953f22504cd5114951564cca74673a92 | from datetime import datetime, timedelta
from constants import Axes
from detection_utils import (
active_detections,
align_adjustment,
center_pole_index,
gate_dets,
buoy_dets,
jianshi_dets,
triangle_dets
)
import settings
def control(args):
return args[0]
def data(args):
return args[1]
def conditions(args):
return args[2]
class State:
def __init__(self):
self.set_heading = 0
self.set_depth = 0
self.set_velocity = 0
self.microstate = None
def __call__(self, args):
return NotImplementedError
def heading(self):
return self.set_heading
def depth(self):
return self.set_depth
def velocity(self):
return self.set_velocity
def __str__(self):
return " - ".join([self.__class__.__name__,
self.microstate.__name__,
str(self.set_heading),
str(self.set_depth),
str(self.set_velocity)])
class KilledState(State):
def __init__(self):
super(KilledState, self).__init__()
self.microstate = self.check_unkilled
self.unkilled_heading = 0
def __call__(self, args):
return self.microstate(args)
def check_unkilled(self, args):
if not conditions(args)["killed"](args):
self.unkilled_heading = data(args)["angular"].acc[Axes.zaxis]
self.microstate = self.wait_for_depth
conditions(args)["below_starting_depth"].reset(False)
return self
def wait_for_depth(self, args):
if conditions(args)["killed"](args):
return KilledState()
if conditions(args)["below_starting_depth"](args):
return SinkAndFindHeading(Gate(self.unkilled_heading),
data(args)["angular"].acc[Axes.zaxis],
self.unkilled_heading,
settings.GATE_TARGET_DEPTH)
# return SinkAndFindHeading(FindTriangleBuoy(self.unkilled_heading),
# data(args)["angular"].acc[Axes.zaxis],
# self.unkilled_heading,
# settings.BUOY_TARGET_DEPTH)
return self
def heading(self):
return 0
def depth(self):
return 0
def velocity(self):
return 0
class KillableState(State):
def __init__(self):
super(KillableState, self).__init__()
def __call__(self, args):
if conditions(args)["killed"](args):
return KilledState()
return self.microstate(args)
class Surface(KillableState):
def __init__(self):
super(Surface, self).__init__()
self.set_heading = 0
self.set_depth = 0
self.set_velocity = 0
self.microstate = self.dead
def dead(self, args):
return self
class Gate(KillableState):
def __init__(self, gate_heading):
super(Gate, self).__init__()
self.set_heading = gate_heading
self.set_depth = settings.GATE_TARGET_DEPTH
self.set_velocity = settings.GATE_VELOCITY
self.processed_frame = False
self.left_det = None
self.right_det = None
self.dets = []
self.microstate = self.initial_dead_reckon
def initial_dead_reckon(self, args):
if conditions(args)["interesting_frame"](args):
d = data(args)["forwarddetection"].detections
self.dets = gate_dets(d)
num_dets = len(self.dets)
if num_dets == 1:
self.microstate = self.initial_one_det
else:
self.microstate = self.transition_on_new_frame
return self
def initial_one_det(self, args):
if not self.processed_frame:
self.processed_frame = True
if self.dets[0].x > 0.5 + settings.VISION_X_TOLERANCE:
self.set_heading += settings.GATE_HEADING_ADJUST
if self.dets[0].x < 0.5 - settings.VISION_X_TOLERANCE:
self.set_heading -= settings.GATE_HEADING_ADJUST
if conditions(args)["has_new_frame"](args):
self.processed_frame = False
d = data(args)["forwarddetection"].detections
self.dets = gate_dets(d)
num_dets = len(self.dets)
if num_dets == 0:
self.microstate = self.initial_dead_reckon
elif num_dets > 1:
self.microstate = self.transition_on_new_frame
return self
def transition_on_new_frame(self, args):
d = data(args)["forwarddetection"].detections
self.dets = gate_dets(d)
if conditions(args)["has_one_gate"](args):
self.microstate = self.one_det
elif conditions(args)["has_two_gates"](args):
self.microstate = self.two_dets
elif conditions(args)["has_three_gates"](args):
self.microstate = self.three_dets
else:
self.microstate = self.dead_reckon
return self
def dead_reckon(self, args):
self.microstate = self.wait_for_new_frame
return self
def two_dets(self, args):
cpi = center_pole_index(self.dets)
if cpi == -1:
self.left_det = self.dets[0]
self.right_det = self.dets[1]
self.microstate = self.align_between
elif cpi == 0:
if settings.GATE_40_LEFT:
self.microstate = self.turn_left
else:
self.left_det = self.dets[0]
self.right_det = self.dets[1]
self.microstate = self.align_between
elif cpi == 1:
if settings.GATE_40_LEFT:
self.left_det = self.dets[0]
self.right_det = self.dets[1]
self.microstate = self.align_between
else:
self.microstate = self.turn_right
return self
def three_dets(self, args):
if settings.GATE_40_LEFT:
self.left_det = self.dets[0]
self.right_det = self.dets[1]
self.microstate = self.align_between
else:
self.left_det = self.dets[1]
self.right_det = self.dets[2]
self.microstate = self.align_between
return self
def one_det(self, args):
if self.dets[0].x >= 0.5:
self.microstate = self.turn_left
if self.dets[0].x < 0.5:
self.microstate = self.turn_right
return self
def wait_for_new_frame(self, args):
if conditions(args)["is_through_gate"](args):
self.microstate = self.do_720
elif conditions(args)["has_new_frame"](args):
self.microstate = self.transition_on_new_frame
return self
def align_between(self, args):
self.set_heading += settings.GATE_HEADING_ADJUST * \
align_adjustment(self.left_det, self.right_det)
self.microstate = self.wait_for_new_frame
return self
def turn_right(self, args):
self.set_heading += settings.GATE_HEADING_ADJUST
self.microstate = self.wait_for_new_frame
return self
def turn_left(self, args):
self.set_heading -= settings.GATE_HEADING_ADJUST
self.microstate = self.wait_for_new_frame
return self
def do_720(self, args):
self.microstate = self.prepare_for_buoys
heading_0 = self.set_heading + 0
heading_90 = self.set_heading + 90
heading_180 = self.set_heading + 180
heading_270 = self.set_heading + 270
turn7 = SinkAndFindHeading(self, heading_270, heading_0, settings.GATE_TARGET_DEPTH)
turn6 = SinkAndFindHeading(turn7, heading_180, heading_270, settings.GATE_TARGET_DEPTH)
turn5 = SinkAndFindHeading(turn6, heading_90, heading_180, settings.GATE_TARGET_DEPTH)
turn4 = SinkAndFindHeading(turn5, heading_0, heading_90, settings.GATE_TARGET_DEPTH)
turn3 = SinkAndFindHeading(turn4, heading_270, heading_0, settings.GATE_TARGET_DEPTH)
turn2 = SinkAndFindHeading(turn3, heading_180, heading_270, settings.GATE_TARGET_DEPTH)
turn1 = SinkAndFindHeading(turn2, heading_90, heading_180, settings.GATE_TARGET_DEPTH)
turn0 = SinkAndFindHeading(turn1, heading_0, heading_90, settings.GATE_TARGET_DEPTH)
return turn0
# def __init__(self, return_state, start_heading, target_heading, target_depth):
def prepare_for_buoys(self, args):
initial_turn_heading = self.set_heading + settings.G2B_INITIAL_TURN_AMOUNT
final_turn_heading = initial_turn_heading + settings.G2B_FINAL_TURN_AMOUNT
# build the steps in reverse
enter_buoys = FindTriangleBuoy(final_turn_heading)
turn_around = SinkAndFindHeading(enter_buoys,
initial_turn_heading,
final_turn_heading,
settings.BUOY_TARGET_DEPTH)
drive_forward = DriveForward(turn_around,
initial_turn_heading,
settings.G2B_TARGET_DEPTH,
settings.G2B_FORWARD_VELOCITY,
settings.G2B_FORWARD_TIME)
initial_turn = SinkAndFindHeading(drive_forward,
self.set_heading,
initial_turn_heading,
settings.G2B_TARGET_DEPTH)
return initial_turn
class FindTriangleBuoy(KillableState):
def __init__(self, start_heading):
super(FindTriangleBuoy, self).__init__()
self.set_heading = start_heading
self.set_depth = settings.BUOY_TARGET_DEPTH
self.align_det = None
self.microstate = self.drive_forward
def transition_on_new_frame(self, args):
dets = data(args)["forwarddetection"].detections
dets = buoy_dets(dets)
if triangle_dets(dets):
d = triangle_dets(dets)[0]
if d.cxt > settings.BUOY_SIZE_THRESH:
find_jianshi = FindJianshiBuoy(self.set_heading)
return TouchBuoy(find_jianshi, self.set_heading)
self.align_det = d
self.microstate = self.align_to
elif jianshi_dets(dets):
if settings.BUOY_TRIANGLE_LEFT:
self.microstate = self.turn_left
else:
self.microstate = self.turn_right
else:
self.microstate = self.drive_forward
return self
def wait_for_new_frame(self, args):
if conditions(args)["has_new_frame"](args):
self.microstate = self.transition_on_new_frame
return self
def drive_forward(self, args):
self.set_velocity = settings.BUOY_VELOCITY
self.microstate = self.wait_for_new_frame
return self
def align_to(self, args):
self.set_velocity = settings.BUOY_VELOCITY
if self.align_det.x < (0.5 - settings.VISION_X_TOLERANCE):
self.microstate = self.turn_left
elif self.align_det.x > (0.5 + settings.VISION_X_TOLERANCE):
self.microstate = self.turn_right
else:
self.microstate = self.wait_for_new_frame
return self
def turn_right(self, args):
self.set_heading += settings.BUOY_HEADING_ADJUST
self.microstate = self.wait_for_new_frame
return self
def turn_left(self, args):
self.set_heading -= settings.BUOY_HEADING_ADJUST
self.microstate = self.wait_for_new_frame
return self
class FindJianshiBuoy(KillableState):
def __init__(self, start_heading):
super(FindJianshiBuoy, self).__init__()
self.set_heading = start_heading
self.set_depth = settings.BUOY_TARGET_DEPTH
self.align_det = None
self.microstate = self.drive_forward
def transition_on_new_frame(self, args):
dets = data(args)["forwarddetection"].detections
dets = buoy_dets(dets)
if jianshi_dets(dets) > 0:
d = jianshi_dets(dets)[0]
if d.cxt > settings.BUOY_SIZE_THRESH:
# TODO
return TouchBuoy(Surface(), self.set_heading)
self.align_det = d
self.microstate = self.align_to
elif triangle_dets(dets) > 0:
if settings.BUOY_TRIANGLE_LEFT:
self.microstate = self.turn_right
else:
self.microstate = self.turn_left
else:
self.microstate = self.drive_forward
return self
def wait_for_new_frame(self, args):
if conditions(args)["has_new_frame"](args):
self.microstate = self.transition_on_new_frame
return self
def drive_forward(self, args):
self.set_velocity = settings.BUOY_VELOCITY
self.microstate = self.wait_for_new_frame
return self
def align_to(self, args):
self.set_velocity = settings.BUOY_VELOCITY
if self.align_det.x < (0.5 - settings.VISION_X_TOLERANCE):
self.microstate = self.turn_left
elif self.align_det.x > (0.5 + settings.VISION_X_TOLERANCE):
self.microstate = self.turn_right
else:
self.microstate = self.wait_for_new_frame
return self
def turn_right(self, args):
self.set_heading += settings.BUOY_HEADING_ADJUST
self.microstate = self.wait_for_new_frame
return self
def turn_left(self, args):
self.set_heading -= settings.BUOY_HEADING_ADJUST
self.microstate = self.wait_for_new_frame
return self
class TouchBuoy(KillableState):
def __init__(self, return_state, start_heading):
super(TouchBuoy, self).__init__()
self.return_state = return_state
self.set_heading = start_heading
self.set_depth = settings.BUOY_TARGET_DEPTH
self.set_velocity = settings.BUOY_TOUCH_VELOCITY
self.start_time = datetime.now()
self.microstate = self.drive_into_buoy
def drive_into_buoy(self, args):
self.set_velocity = settings.BUOY_TOUCH_VELOCITY
end_time = self.start_time + timedelta(milliseconds=settings.BUOY_TOUCH_TIME)
cur_time = datetime.now()
if cur_time > end_time:
self.start_time = cur_time
self.microstate = self.stop
return self
def stop(self, args):
self.set_velocity = 0
end_time = self.start_time + timedelta(milliseconds=settings.BUOY_STOP_TIME)
cur_time = datetime.now()
if cur_time > end_time:
self.start_time = cur_time
self.microstate = self.back_off
return self
def back_off(self, args):
self.set_velocity = settings.BUOY_BACKOFF_VELOCITY
end_time = self.start_time + timedelta(milliseconds=settings.BUOY_BACKOFF_TIME)
cur_time = datetime.now()
if cur_time > end_time:
return self.return_state
return self
class SinkAndFindHeading(KillableState):
def __init__(self, return_state, start_heading, target_heading, target_depth):
super(SinkAndFindHeading, self).__init__()
self.set_heading = start_heading
self.set_depth = target_depth
self.target_heading = target_heading
self.return_state = return_state
self.microstate = self.sink
def sink(self, args):
if conditions(args)["at_depth"](args):
self.microstate = self.align
conditions(args)["at_heading"].reset(False)
return self
def align(self, args):
self.set_heading = self.target_heading
if conditions(args)["settled"](args):
return self.return_state
return self
class DriveForward(KillableState):
def __init__(self, return_state, heading, depth, velocity, time):
super(DriveForward, self).__init__()
self.set_heading = heading
self.set_depth = depth
self.set_velocity = velocity
self.end_time = datetime.now() + timedelta(milliseconds=time)
self.microstate = self.drive_forward
self.return_state = return_state
def drive_forward(self, args):
if datetime.now() > self.end_time:
return self.return_state
return self
|
983,452 | df9e1701aa0840fa45ddd4bb3797d97b487503f8 | from django.conf import settings
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from education.libraries.cse.api import CSEAPI
def view(request, id):
api = CSEAPI()
product = api.get_product(id)
return render_to_response('product/view.html', dict(product=product), context_instance=RequestContext(request)) |
983,453 | f1e9bb25a1ea835b099a887c0d750cd7f99a3bc0 | from django.db import models
# Create your models here.
class ScheduleEvent(models.Model):
title = models.CharField(max_length=200)
start = models.DateTimeField()
end = models.DateTimeField()
def __str__(self):
return f'{self.title}-id is {self.id}' |
983,454 | 0ce7f7ad53ff45eea13551d514c69d5ab14a4dec | #=======================================================================================================================
# Copyright (c) Autodesk, Inc. All rights reserved
# Written by Yusuke Mori, Autodesk Consulting 2018
#
# This software is provided as is, without any warranty that it will work. You choose to use this tool at your own risk.
# Neither Autodesk nor the authors can be taken as responsible for any damage this tool can cause to
# your data. Please always make a back up of your data prior to use this tool.
#
#=======================================================================================================================
"""
WSGI config for forge_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
from __future__ import absolute_import, unicode_literals
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "forge_django.settings.dev")
application = get_wsgi_application()
|
983,455 | acb684b405c9647fb7b11ca794f542cb708b4b45 | """A class that can be used to represent a car."""
class Car():
"""A simple attempt to represent a car."""
def __init__(self,make,model,year):
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
print("This car has " + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
if mileage >= self.odometer_reading:
self.odometer_reading =mileage
else:
print("You can't roll back an odometer!")
def increment_odometer(self,miles):
self.odometer_reading += miles
class ElectricCar(Car):
"""Represent aspects of a car, specific to electric vehicles."""
def __init__(self,make,model,year):
""" Initialize attributes of the parent class."""
super().__init__(make,model,year)
self.battery = Battery()
my_car = ElectricCar('tesla', 'model s', 2016)
print(my_car.get_descriptive_name())
my_car.battery.describe_battery()
my_car.battery.get_range()
my_car.battery.upgrade_battery()
my_car.battery.get_range()
my_car.battery.describe_battery()
|
983,456 | 2c4eca867119774b2a0d3af7fcc2cfe2c05ada3d | import PHPTraceTokenizer
import PHPProfileParser
import PHPTraceParser
import os
traceDir = "test-data"
def trace_and_profile_from_timestamp(traceDir, timestamp):
return (
os.path.join(traceDir, "{}.xt".format(timestamp)),
os.path.join(traceDir, "{}.xp".format(timestamp))
)
def create_trace(traceFile, profileFile):
function_mappings = PHPProfileParser.get_function_file_mapping(profileFile)
return PHPTraceTokenizer.Trace(traceFile, function_mappings)
def traceNoExceptionsTest(timestamp):
traceFile, profileFile = trace_and_profile_from_timestamp(traceDir, timestamp)
trace = create_trace(traceFile, profileFile)
traceNoExceptionsTest('1541770537') |
983,457 | 87b85731a6090e2e683a365672a525b344b0e13e |
import os
from tkinter import filedialog,Listbox
import tkinter as tk
from tkinter import ttk
"""
presentationHelper.py
holds functions and classes used in a number of frames
"""
def list_files(filepath, filetype):
paths = []
for root, dirs, files in os.walk(filepath):
for file in files:
if file.lower().endswith(filetype.lower()):
paths.append(os.path.join(root, file))
return(paths)
#TODO: switch listbox to something more abstract
def UploadImage(imagePaths,listbox,AllowedFiletypes):
"""[gets the paths from user added images]
Args:
imagePaths ([List]): [The list of image paths the form holds]
listbox ([Listbox]): [The list box to witch the image be added to ]
AllowedFiletypes ([tupels of tupels]): [
the suffix of the files we want
example:
((' Portable Network Graphics files', '*.png'),('All files', '*.*'))
]
"""
filenames = filedialog.askopenfilenames(filetypes=AllowedFiletypes)
for afile in filenames:
if(afile != ""):
imagePaths.append(afile)
listbox.insert(tk.END, os.path.basename(afile))
#TODO: switch listbox to something more abstract- or by type
def UploadFolder(imagePaths,listbox,ImageType='.png'):
"""[summary]
Args:
imagePaths ([List]): [The list of image paths the form holds]
listbox ([Listbox]): [The list box to witch the image be added to ]
ImageType (str, optional): [the suffix of the files we want
example '.txt']. Defaults to '.png'.
"""
foldername = filedialog.askdirectory()
if foldername == ():
return
filenames=list_files(foldername,ImageType)
for afile in filenames:
if(afile != ""):
imagePaths.append(afile)
listbox.insert(tk.END, os.path.basename(afile))
return foldername
class LoadingScreen(tk.Frame):
def __init__(self, master,connection,labelName):
import tkinter.ttk as ttk
import threading
self.connection=connection
self.master = master
self.master.geometry('350x120')
self.frame = tk.Frame(self.master)
self.frame.pack(anchor=tk.CENTER,expand=True,fill=tk.BOTH)
self.label=ttk.Label(self.frame, text=labelName)
self.label.pack(side="top",anchor=tk.CENTER,pady=5,padx=5)
self.progress = ttk.Progressbar(self.frame, orient = tk.HORIZONTAL,
length = 100, mode = 'indeterminate')
self.progress.pack( fill=tk.BOTH ,pady=5,padx=5)
ttk.Button(self.frame, text="Cancel",command= lambda : self.cancelProcess()
).pack(pady=5,padx=5 )
self.progressbarThread = threading.Thread(target=self.barProgress, daemon=True)
self.processEnded=False
self.progressbarThread.start()
def barProgress(self):
import time
import tkinter.ttk as ttk
def isItDone():
if(self.connection.poll()):
try:
answer=self.connection.recv()
if(answer=="DONE" or answer=="FAIL"):
self.processEnded=True
if(answer=="FAIL"):
import tkinter.messagebox
tkinter.messagebox.showerror(title="training message", message="Model problems \n\nContact support for help")
else:
self.label['text']=answer
except Exception:
pass
try:
while(self.processEnded == False ):
for prog in range(0,100,1):
self.progress['value'] = prog
self.frame.update_idletasks()
time.sleep(0.1)
isItDone()
if(self.processEnded==True):
break
for prog in range(100,0,1):
self.progress['value'] = prog
self.frame.update_idletasks()
time.sleep(0.1)
isItDone()
if(self.processEnded==True):
break
#user canceld manually
except Exception:
self.processEnded=True
self.cancelProcess()
def cancelProcess(self):
#if process ended
if(self.processEnded != True ):
self.connection.send("canceled")
self.connection.close()
self.master.destroy()
class VerticalScrolledFrame(tk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
"""A pure Tkinter scrollable frame that actually works!
* Use the 'interior' attribute to place widgets inside the scrollable frame
* Construct and pack/place/grid normally
* This frame only allows vertical scrolling
"""
#found on: https://stackoverflow.com/a/16198198/15638952
def __init__(self, parent, *args, **kw):
tk.Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
vscrollbar.pack(fill=tk.Y, side=tk.RIGHT, expand=tk.FALSE)
canvas = tk.Canvas(self, bd=0, highlightthickness=0,
yscrollcommand=vscrollbar.set)
canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.TRUE)
vscrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = tk.Frame(canvas)
interior_id = canvas.create_window(0, 0, window=interior,
anchor=tk.NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
size = (interior.winfo_reqwidth(), interior.winfo_reqheight())
canvas.config(scrollregion="0 0 %s %s" % size)
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the canvas's width to fit the inner frame
canvas.config(width=interior.winfo_reqwidth())
interior.bind('<Configure>', _configure_interior)
def _configure_canvas(event):
if interior.winfo_reqwidth() != canvas.winfo_width():
# update the inner frame's width to fill the canvas
canvas.itemconfigure(interior_id, width=canvas.winfo_width())
canvas.bind('<Configure>', _configure_canvas) |
983,458 | a5deba001c40099fd22c2d416c45eaf0571d2647 | """
脚本层的一些公共方法
"""
#######################################
"""
python 导入包的规则
1.安装目录找包
2.如果使用IDE,会从过程的根路径开始,向下搜索
3.命令行执行时,当前执行的py文件开始,向下搜索
命令行执行时,报错找不到包。解决办法:把工程路径,放到sys.path中
"""
import os
import sys
cp = os.path.realpath(__file__)
cd = os.path.dirname(cp)
cd = os.path.dirname(cd)
cd = os.path.dirname(cd)
sys.path.append(cd)
#######################################
import pytest
from zonghe.caw import DataRead
from zonghe.caw.BaseRequests import BaseRequests
env_path = r"data_env\env.ini"
# 读取env.ini中的url,设置session级别的,整个执行过程读一次
@pytest.fixture(scope="session")
def url():
return DataRead.read_ini(env_path, "url")
@pytest.fixture(scope="session")
def db():
return eval(DataRead.read_ini(env_path, "db"))
# 创建一个BaseRequests的实例,设置session级别的,整个执行过程只有一个实例,自动化管理Cookie
@pytest.fixture(scope="session")
def baserequests():
return BaseRequests()
|
983,459 | 23957570feda175020d7f7b774016580f26736ba | # Generated by Django 4.1.3 on 2023-07-03 18:03
from django.contrib.postgres.operations import BtreeGinExtension, TrigramExtension
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0028_alter_systemversion_countries'),
]
operations = [
BtreeGinExtension(),
TrigramExtension()
]
|
983,460 | e72026aace328acff05e99d8935669183ab41d27 | import graphene
from graphene_django.types import DjangoObjectType
from graphql_relay.node.node import from_global_id
import json
from . import models
'''
Model Returns
'''
class ClientType(DjangoObjectType):
class Meta:
model = models.Client
interfaces = (graphene.Node, )
class ProductType(DjangoObjectType):
class Meta:
model = models.Product
interfaces = (graphene.Node, )
class EdgeType(DjangoObjectType):
class Meta:
model = models.Edge
interfaces = (graphene.Node, )
class NodeType(DjangoObjectType):
class Meta:
model = models.Node
interfaces = (graphene.Node, )
class PalletType(DjangoObjectType):
class Meta:
model = models.Pallet
interfaces = (graphene.Node, )
class ProductBundleType(DjangoObjectType):
class Meta:
model = models.ProductBundle
interfaces = (graphene.Node, )
class MovementType(DjangoObjectType):
class Meta:
model = models.Movement
interfaces = (graphene.Node, )
'''
Query Returns
'''
class Query(object):
'''
Clients Queries
'''
all_clients = graphene.List(ClientType)
client = graphene.Field(ClientType, clientID=graphene.ID())
def resolve_all_clients(self, args):
return models.Client.objects.all()
def resolve_client(self, info, clientID):
return models.Client.objects.get(pk=clientID)
'''
Products Queries
'''
all_products = graphene.List(ProductType)
product = graphene.Field(ProductType, productID=graphene.ID())
def resolve_all_products(self, args):
return models.Product.objects.all()
def resolve_product(self, info, productID):
return models.Product.objects.get(pk=productID)
'''
Product Bundle Queries
'''
all_product_bundles = graphene.List(ProductBundleType)
product_bundle = graphene.Field(ProductBundleType, bundleID=graphene.ID())
def resolve_all_product_bundles(self, args):
return models.ProductBundle.objects.all()
def resolve_product_bundle(self, info, bundleID):
return models.ProductBundle.objects.get(pk=bundleID)
'''
Pallet Queries
'''
all_pallets = graphene.List(PalletType)
pallet = graphene.Field(PalletType, palletID=graphene.ID())
def resolve_all_pallets(self, args):
return models.Pallet.objects.all()
def resolve_pallet(self, info, palletID):
return models.Pallet.objects.get(pk=palletID)
'''
Movement Queries
'''
all_movements = graphene.List(MovementType)
movement = graphene.Field(MovementType, movementID=graphene.ID())
def resolve_all_movements(self, args):
return models.Movement.objects.all()
def resolve_movement(self, info, movementID):
return models.Movement.objects.get(pk=movementID)
'''
Mutation
'''
class CreateMessageMutation(graphene.Mutation):
class Input:
clientName = graphene.String()
clientAddress = graphene.String()
clientPhone = graphene.String()
status = graphene.Int()
formErrors = graphene.String()
client = graphene.Field(ClientType)
@staticmethod
def mutate(root, info, context, clientName, clientPhone, clientAddress):
if not context.user.is_authenticated():
return CreateMessageMutation(status=403)
# Here we would usually use Django forms to validate the input
if not clientName:
return CreateMessageMutation(
status=400,
formErrors=json.dumps(
{
'clientName': ['Please enter Client name'],
'clientAddress': ['Please enter Client address'],
'clientPhone': ['Please enter Client phone']}))
obj = models.Client.objects.create(
clientName=clientName, clientAddress=clientAddress, clientPhone=clientPhone
)
return CreateMessageMutation(status=200, message=obj)
class Mutation(graphene.AbstractType):
create_client = CreateMessageMutation.Field() |
983,461 | f1b90d9a2e48ca3e6aa741c0ba58d36b803eed39 | #AES in ECB mode decryption only
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.backends import default_backend
import base64
aesKey = b'NO PAIN NO GAIN!'
cipher = Cipher(algorithms.AES(aesKey), modes.ECB(), backend=default_backend())
#message = b'Try this for a change'
'''
This part encrypts my own text for students. The pt is in 7alt.txt
'''
f1 = open('7alt.txt', 'r')
pt = f1.read()
f1.close()
encryptor = cipher.encryptor()
padder = padding.PKCS7(128).padder()
padded_data = padder.update(pt.encode()) + padder.finalize()
textEnc = encryptor.update(padded_data) + encryptor.finalize()
textEnc = base64.b64encode(textEnc)
#write the encrypted text as base64 in the file
f2 = open('7altEnc.txt', 'w')
f2.write(str(textEnc, 'utf-8'))
f2.close()
#read the encrypted text, base64 decode it and then encrypt_decrypt it
f3 = open('w3p1.txt', 'r')
ct = f3.read()
f3.close()
ct = base64.b64decode(ct)
decryptor = cipher.decryptor()
textOut = decryptor.update(ct) + decryptor.finalize() #output is in bytes
print(str(textOut, 'utf-8')) #convert bytes to string
|
983,462 | ecdffe30d833a1c3237033716f4eba4dbd16b047 | import pandas as pd
uselog=pd.read_csv("use_log.csv")
customer=pd.read_csv("customer_master.csv")
class_master=pd.read_csv("class_master.csv")
campaign_master=pd.read_csv("campaign_master.csv")
customer_join=pd.merge(customer,class_master,on="class",how="left")
customer_join=pd.merge(customer_join,campaign_master,on="campaign_id",how="left")
print(customer_join.head(),len(customer),len(customer_join))
print(customer_join.isnull().any(axis=0))#欠損値の確認 |
983,463 | 16aa50d630d8f44baac1fdd117e43c957c0156b0 | from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC, SVC
from tqdm import tqdm
import os
import numpy as np
import pandas as pd
csv_path = 'processed.csv'
if __name__ == '__main__':
# Read file
df = pd.read_csv(os.path.join(csv_path), header=0, index_col=0)
# Split data
targets = np.int64([0, 1])
target_names = ['ham', 'spam']
X_train, X_test, y_train, y_test = train_test_split(
df['text'], df['is_spam'], test_size=0.2, random_state=191)
print('Data set:')
print('{} total'.format(df.shape[0]))
for t, t_name in zip(targets, target_names):
print('{} {}'.format(len(df[df['is_spam'] == t]), t_name))
print('\nTraining set:')
print('{} total'.format(len(X_train)))
for t, t_name in zip(targets, target_names):
print('{} {}'.format(sum([y == t for y in y_train]), t_name))
print('\nTest set:')
print('{} total'.format(len(X_test)))
for t, t_name in zip(targets, target_names):
print('{} {}'.format(sum([y == t for y in y_test]), t_name))
print('')
# Pipeline
pipe = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', SVC(kernel='sigmoid', C=10, gamma=1))
# ('clf', LinearSVC(C=100))
# ('clf', LinearSVC(C=10))
# ('clf', SVC(kernel='rbf', C=100, gamma='auto'))
# ('clf', SVC(kernel='linear', C=10, gamma=10))
])
pipe.fit(X_train, y_train)
predicted = pipe.predict(X_test)
print('Accuracy: {:.05%}'.format(np.mean(predicted == y_test)))
print(metrics.classification_report(y_test,
predicted,
target_names=target_names))
|
983,464 | 1c5262fc4042df3fdc2f8523864f0bb371b32d61 | #-*-coding:utf-8-*-
import sys
sys.path.append('.')
import numpy as np
from train_config import config as cfg
from anchor.base_anchor import generate_anchors
from anchor.common import np_iou,filter_boxes_inside_shape
from anchor.box_utils import encode
def produce_target(image,boxes,labels):
boxes = boxes.copy()
all_anchors_flatten = cfg.ANCHOR.achors
#inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, image.shape[:2])
inside_anchors=all_anchors_flatten
# obtain anchor labels and their corresponding gt boxes
anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes,labels)
# map back to all_anchors
num_all_anchors = all_anchors_flatten.shape[0]
all_labels = np.zeros((num_all_anchors, ), dtype='int32')
#all_labels[inside_ind] = anchor_labels
all_labels=anchor_labels
all_boxes = np.zeros((num_all_anchors, 4), dtype='float32')
#all_boxes[inside_ind] = anchor_gt_boxes
all_boxes=anchor_gt_boxes
if boxes.shape[0]==0:
print('hihihi')
all_labels = np.zeros((num_all_anchors,), dtype='int32')
all_boxes = np.zeros((num_all_anchors, 4), dtype='float32')
# start = 0
# multilevel_inputs = []
# for level_anchor in anchors_per_level:
# assert level_anchor.shape[2] == len(cfg.ANCHOR.ANCHOR_RATIOS)
# anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS
# num_anchor_this_level = np.prod(anchor_shape)
# end = start + num_anchor_this_level
# multilevel_inputs.append(
# (all_labels[start: end].reshape(anchor_shape),
# all_boxes[start: end, :].reshape(anchor_shape + (4,))
# ))
# start = end
# assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors)
# return multilevel_inputs
return all_boxes,all_labels
def get_anchor_labels(anchors, gt_boxes,labels):
# This function will modify labels and return the filtered inds
def filter_box_label(labels, value, max_num):
curr_inds = np.where(labels == value)[0]
if len(curr_inds) > max_num:
disable_inds = np.random.choice(
curr_inds, size=(len(curr_inds) - max_num),
replace=False)
labels[disable_inds] = -1 # ignore them
curr_inds = np.where(labels == value)[0]
return curr_inds
NA, NB = len(anchors), len(gt_boxes)
assert NB > 0 # empty images should have been filtered already
# ##########
anchor_matched_already = np.zeros((NA,), dtype='int32')
gt_boxes_mathed_already = np.zeros((NB,), dtype='int32')
anchor_labels=np.zeros((NA,), dtype='int32')
anchor_boxes = np.zeros((NA, 4), dtype='float32')
box_ious = np_iou(anchors, gt_boxes) # NA x NB
# for each anchor box choose the groundtruth box with largest iou
max_iou = box_ious.max(axis=1) # NA
positive_anchor_indices = np.where(max_iou > cfg.ANCHOR.POSITIVE_ANCHOR_THRESH)[0]
negative_anchor_indices = np.where(max_iou < cfg.ANCHOR.NEGATIVE_ANCHOR_THRESH)[0]
positive_iou = box_ious[positive_anchor_indices]
matched_gt_box_indices = positive_iou.argmax(axis=1)
anchor_labels[positive_anchor_indices]=labels[matched_gt_box_indices]
anchor_boxes[positive_anchor_indices]=gt_boxes[matched_gt_box_indices]
anchor_matched_already[positive_anchor_indices]=1#### marked as matched
gt_boxes_mathed_already[matched_gt_box_indices]=1#### marked as matched
if np.sum(anchor_matched_already)>0:
n=np.sum(anchor_matched_already)/np.sum(gt_boxes_mathed_already)
else:
n=cfg.ANCHOR.AVG_MATCHES
n= n if n >cfg.ANCHOR.AVG_MATCHES else cfg.ANCHOR.AVG_MATCHES
if not cfg.ANCHOR.super_match:
n=cfg.ANCHOR.AVG_MATCHES
# some gt_boxes may not matched, find them and match them with n anchors for each gt box
box_ious[box_ious<cfg.ANCHOR.NEGATIVE_ANCHOR_THRESH]=0
sorted_ious=np.argsort(-box_ious,axis=0)
sorted_ious=sorted_ious[np.logical_not(anchor_matched_already)]
for i in range(0,len(gt_boxes_mathed_already)):
matched_count=np.sum(matched_gt_box_indices==gt_boxes_mathed_already[i])
if matched_count>=n:
continue
else:
for j in range(0,int(n-matched_count)):
if box_ious[sorted_ious[j][i]][i]>cfg.ANCHOR.NEGATIVE_ANCHOR_THRESH:
anchor_labels[sorted_ious[j][i]]= labels[i]
anchor_boxes[sorted_ious[j][i]] = gt_boxes[i]
anchor_matched_already[sorted_ious[j][i]]=1
gt_boxes_mathed_already[i]=1
fg_boxes=anchor_boxes[anchor_matched_already.astype(np.bool)]
matched_anchors=anchors[anchor_matched_already.astype(np.bool)]
##select and normlised
fg_boxes=fg_boxes/cfg.DATA.MAX_SIZE
matched_anchors=matched_anchors/cfg.DATA.MAX_SIZE
encode_fg_boxes=encode(fg_boxes,matched_anchors)
anchor_boxes[anchor_matched_already.astype(np.bool)] = encode_fg_boxes
# assert len(fg_inds) + np.sum(anchor_labels == 0) == cfg.ANCHOR.BATCH_PER_IM
return anchor_labels, anchor_boxes
def get_all_anchors(stride=None, sizes=None):
"""
Get all anchors in the largest possible image, shifted, floatbox
Args:
stride (int): the stride of anchors.
sizes (tuple[int]): the sizes (sqrt area) of anchors
Returns:
anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox
The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE.
"""
if stride is None:
stride = cfg.ANCHOR.ANCHOR_STRIDE
if sizes is None:
sizes = cfg.ANCHOR.ANCHOR_SIZES
# Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
# are centered on stride / 2, have (approximate) sqrt areas of the specified
# sizes, and aspect ratios as given.
cell_anchors = generate_anchors(
stride,
scales=np.array(sizes, dtype=np.float) / stride,
ratios=np.array(cfg.ANCHOR.ANCHOR_RATIOS, dtype=np.float))
# anchors are intbox here.
# anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride)
max_size = cfg.DATA.MAX_SIZE
field_size = int(np.ceil(max_size / stride))
shifts = np.arange(0, field_size) * stride
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.flatten()
shift_y = shift_y.flatten()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
# Kx4, K = field_size * field_size
K = shifts.shape[0]
A = cell_anchors.shape[0]
field_of_anchors = (
cell_anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))
# FSxFSxAx4
# Many rounding happens inside the anchor code anyway
# assert np.all(field_of_anchors == field_of_anchors.astype('int32'))
field_of_anchors = field_of_anchors.astype('float32')
field_of_anchors[:, :, :, [2, 3]] += 1
return field_of_anchors
def get_all_anchors_fpn(strides=None, sizes=None):
"""
Returns:
[anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array.
"""
if strides is None:
strides = cfg.ANCHOR.ANCHOR_STRIDES
if sizes is None:
sizes = cfg.ANCHOR.ANCHOR_SIZES
assert len(strides) == len(sizes)
foas = []
for stride, size in zip(strides, sizes):
foa = get_all_anchors(stride=stride, sizes=(size,))
foas.append(foa)
return foas
if __name__ == '__main__':
import cv2
anchors=cfg.ANCHOR.achors
image=np.ones(shape=[cfg.DATA.MAX_SIZE,cfg.DATA.MAX_SIZE,3])*255
# for x in anchors:
# print(x.shape)
anchors=np.array(anchors)
for i in range(0,anchors.shape[0]):
box=anchors[i]
print(box[2]-box[0])
cv2.rectangle(image, (int(box[0]), int(box[1])),
(int(box[2]), int(box[3])), (255, 0, 0), 1)
cv2.namedWindow('anchors',0)
cv2.imshow('anchors',image)
cv2.waitKey(0)
a,b=produce_target(image,np.array([[34., 396., 58., 508.],[20,140,50,160]]),np.array([1,1]))
# print(a.shape)
# print(b.shape)
#
#
#
# for i in range(len(a)):
# label_target=b[i]
# boxes_target=a[i]
#
#
#
# if label_target>0:
# box=boxes_target
# print(box)
|
983,465 | 905470e21bf88b6576aab0b3a135c8043dfe1665 | import pymysql
#1.建立连接
conn = pymysql.connect(
host="192.168.100.78",
port=3306,
user="root",
passwd="root",
db="51fanli_zymall",
charset='utf8'
)
#2.建立游标
cur = conn.cursor()
#3.查询数据库
cur.execute("SELECT * FROM product WHERE SELLER_ID=41")
#4.获取查询结果
result = cur.fetchall()
print(result)
#3.更新数据
cur.execute("UPDATE product SET brand_id=2 WHERE product_code=88880000000173")
#4.提交修改
conn.commit() # 注意是用的conn不是cur
#5.关闭游标和连接
cur.close()
conn.close()
#回滚数据
try:
cur.execute("DELETE FROM product WHERE product_code=88880000000173")
conn.commit()
except Exception as e:
conn.rollback()
print(str(e)) |
983,466 | ed8cc0a41bdd9125efdf520c27af98438c3a3829 | pet = ['kitten', 'mouse', 'hamster', 'parrot', 'shrimp']
from random import choice
from random import shuffle
quiz = choice(pet)
list_quiz = list(quiz)
shuffle(list_quiz)
for i in range(len(list_quiz)):
print(list_quiz[i], end=' ')
print('\n')
answer = input('your answer:')
if answer == quiz:
print("Hura")
else:
print(":(") |
983,467 | ed64e54923a6924024a3a06830f8b9e49aea542e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : mprt.py
# @Date : 2019-02-16
# @Author : luyang(luyang@novogene.com)
import re
from Bio import ExPASy
from Bio import SeqIO
def main():
file = 'input/rosalind_mprt.txt'
with open(file) as f:
for id in f.readlines():
handle = ExPASy.get_sprot_raw(id.strip())
seq_record = SeqIO.read(handle, "swiss")
# ?= 零宽断言
pattern = re.compile('(?=(N[^P][ST][^P]))')
positions = pattern.finditer(str(seq_record.seq))
position = []
for i in positions:
position.append(i.start() + 1)
if position:
print(id.strip())
print(' '.join(map(str, position)))
if __name__ == "__main__":
main()
|
983,468 | 6ca942a2297b87c8fca4245b37cad7d641232395 | import os
def create_dirs():
for i in range(1, 10):
dir_name = 'dir_' + str(i)
os.mkdir(dir_name)
def remove_dirs():
for i in range(1, 10):
dir_name = 'dir_' + str(i)
os.rmdir(dir_name) |
983,469 | 4b06a8792ca0208b8cfad9d429525017f7081167 | # -*- coding: utf-8 -*-
import random
f = open('file.txt')
data = f.read()
l = random.sample(data, 100)
with open("hop.txt", "w") as f:
f.writelines(l) |
983,470 | cdbcc699da674e188b4fbe36af91c38912aae206 | from flask import jsonify
import service.helpers.loadConfig as config
import time
import requests
class MQTTService():
def publish_with_response(topic,response_topic,message, timeout, mac):
url = "http://" + config.restful.host + ":" + config.restful.port + "/api" + config.restful.publishResp
data = {
"topic":topic,
"response_topic":response_topic,
"message":message,
"timeout":timeout,
"mac":mac
}
with requests.Session() as session:
response = session.put(url=url, json=data)
return response.text |
983,471 | 14d07fe5c6d75a08d59fb91b1f5af392019f0b4e | from multiprocessing import Pool
from multiprocessing import Manager
import time
import re,requests
def timecount(f):
def fun():
start = time.time()
f()
end = time.time()
print('消耗时间为'+(end-start)+'s')
return fun
def read():
pass
def main():
with open('1.txt','r') as f:
s = f.read()
result = re.findall(r'(.*?).jpg',s)
print(result)
for i in result:
response = requests.get(i+'.jpg')
content = response.content
print(content)
if __name__ == '__main__':
main() |
983,472 | 1ea90ed379c4909a7f36bf5673d5856cc5edff4b | /home/sheetal/sid/projects/ananconda3/lib/python3.6/imp.py |
983,473 | 20d355a80408e6bf296f44bdcb1eb96c2243defc | from PIL import Image,ImageFilter,ImageOps
img=Image.open('d:\\picture\\1.png')
def dodge(a,b.alpha):
return min(int(a*225/(256-b*alpha)),255)
def draw(img,blur=25,alpha=1.0):
img1=img.convert('L')#图片转灰色
img2=img1.copy()
img2=ImageOps.invert(img2)
for i in range(blur):
img2=img2.filter(ImageFilter.BLUR)
width,height=img1.size
for x in range(width):
for y in range(height):
a=img1.getpixel((x,y))
b=img2.getpixel((x,y))
img1.putpixel((x,y),dodge(a,b,alpha))
img1.show()
img1.save('d:\\picture\\2.png')
draw(img)
|
983,474 | 50eaa663879fe041e75dfbbad8a29030182e77a7 | # --------------------------------------------------------
#
# PYTHON PROGRAM DEFINITION
#
# The knowledge a computer has of Python can be specified in 3 levels:
# (1) Prelude knowledge --> The computer has it by default.
# (2) Borrowed knowledge --> The computer gets this knowledge from 3rd party libraries defined by others
# (but imported by us in this program).
# (3) Generated knowledge --> The computer gets this knowledge from the new functions defined by us in this program.
#
# When launching in a terminal the command:
# user:~$ python3 this_file.py
# our computer first processes this PYTHON PROGRAM DEFINITION section of the file.
# On it, our computer enhances its Python knowledge from levels (2) and (3) with the imports and new functions
# defined in the program. However, it still does not execute anything.
#
# --------------------------------------------------------
# ------------------------------------------
# IMPORTS
# ------------------------------------------
import pyspark
# ------------------------------------------
# FUNCTION my_keys
# ------------------------------------------
def my_keys(sc):
# 1. Operation C1: Creation 'parallelize', so as to store the content of the collection [(1, "A"), (1, "B"), (2, "B")] into an RDD.
# C1: parallelize
# dataset -----------------> inputRDD
inputRDD = sc.parallelize([(1, "A"), (1, "B"), (2, "B")])
# 2. Operation T1: Transformation 'keys', so as to get a new RDD with the keys in inputRDD.
# C1: parallelize T1: keys
# dataset -----------------> inputRDD ----------> keysRDD
keysRDD = inputRDD.keys()
# 3. Operation A1: 'collect'.
# C1: parallelize T1: keys A1: collect
# dataset -----------------> inputRDD ---------> keysRDD ------------> resVAL
resVAL = keysRDD.collect()
# 4. We print by the screen the collection computed in resVAL
for item in resVAL:
print(item)
# ------------------------------------------
# FUNCTION my_values
# ------------------------------------------
def my_values(sc):
# 1. Operation C1: Creation 'parallelize', so as to store the content of the collection [(1, "A"), (1, "B"), (2, "B")] into an RDD.
# C1: parallelize
# dataset -----------------> inputRDD
inputRDD = sc.parallelize([(1, "A"), (1, "B"), (2, "B")])
# 2. Operation T1: Transformation 'values', so as to get a new RDD with the values in inputRDD.
# C1: parallelize T1: values
# dataset -----------------> inputRDD -----------> valuesRDD
valuesRDD = inputRDD.values()
# 3. Operation A1: 'collect'.
# C1: parallelize T1: values A1: collect
# dataset -----------------> inputRDD -----------> valuesRDD ------------> resVAL
resVAL = valuesRDD.collect()
# 4. We print by the screen the collection computed in resVAL
for item in resVAL:
print(item)
# ------------------------------------------
# FUNCTION my_main
# ------------------------------------------
def my_main(sc):
print("\n\n--- [BLOCK 1] keys ---")
my_keys(sc)
print("\n\n--- [BLOCK 2] values ---")
my_values(sc)
# --------------------------------------------------------
#
# PYTHON PROGRAM EXECUTION
#
# Once our computer has finished processing the PYTHON PROGRAM DEFINITION section its knowledge is set.
# Now its time to apply this knowledge.
#
# When launching in a terminal the command:
# user:~$ python3 this_file.py
# our computer finally processes this PYTHON PROGRAM EXECUTION section, which:
# (i) Specifies the function F to be executed.
# (ii) Define any input parameter such this function F has to be called with.
#
# --------------------------------------------------------
if __name__ == '__main__':
# 1. We use as many input arguments as needed
pass
# 2. Local or Databricks
pass
# 3. We configure the Spark Context
sc = pyspark.SparkContext.getOrCreate()
sc.setLogLevel('WARN')
print("\n\n\n")
# 4. We call to my_main
my_main(sc)
|
983,475 | 36b70458e5c25edc2ab087d7d3a7a342df1dd49a | """
用lambda方法来改写技能类
"""
class SkillSysterm:
def __init__(self, skill_name, attack, CD_time, atk_speed):
self.skill_name = skill_name
self.attack = attack
self.CD_time = CD_time
self.atk_speed = atk_speed
def get_skill(list, func):
for item in list:
if func(item):
yield item
list = [
SkillSysterm("一阳指", 15, 0, 3),
SkillSysterm("降龙十八掌", 14, 9, 5),
SkillSysterm("九阴真金", 20, 8, 9),
SkillSysterm("旋风腿", 5, 10, 6)
]
for item in get_skill(list, lambda item: item.CD_time == 0):
print(item.skill_name)
for item in get_skill(list, lambda item: item.attack > 10):
print(item.skill_name)
for item in get_skill(list, lambda item: item.atk_speed < 5):
print(item.skill_name)
|
983,476 | b91c8e6790d82cd82e435759f02ee916af75999a | #s3.delete_bucket(Bucket=str(bucket_name))
import boto3
import firebase_auth as fa
import json
def lambda_handler(event, context):
profile_name=event['headers']['profile']
bucket_name=str(event['pathParameters']['bucket_name'])
#profile_name = str(event["profile"])
#profile_name = str(event['params']['querystring']['profile'])
ref = fa.getReference(profile_name)
s3=boto3.client('s3', region_name=str(ref.get()['region']), aws_access_key_id=str(ref.get()['access_key']), aws_secret_access_key=str(ref.get()['secret_access_key']))
s3.delete_bucket(Bucket=str(bucket_name))
return {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
'body': json.dumps({
'Successful': "Bucket deleted "
}),
"isBase64Encoded": False
}
#s3.create_bucket(Bucket=str(bucket_name), CreateBucketConfiguration={'LocationConstraint': 'ap-south-1'})
|
983,477 | f996f3c8dbf0d2cb8837f925d89def2bf44ed241 | import itertools as it
import abc
import dataclasses as d
from pprint import pprint
import functools as fp
class MExpander(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, '__call__') and
callable(subclass.__call__))
def iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
return True
def m_expandable(obj):
return issubclass(type(obj),MExpander)
def expandable(obj):
return iterable(obj) or m_expandable(obj)
class _Expandable(object):
def __init__(self,o):
self.o=o
def __call__(self,env={}):
return self.o if iterable(self.o) else self.o()
def __enter__(self):
if not (m_expandable(self.o) or iterable(self.o)):
raise Exception(f'Parse error {self.o} is not expandable')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
del self.o
class _Iterable(object):
def __init__(self,o):
self.o=o
self.i=None
def __enter__(self):
try:
self.i = iter(self.o)
except Exception:
raise Exception(f'Parse error {self.o} is not iterable')
return self.i
def __exit__(self, exc_type, exc_val, exc_tb):
del self.o
del self.i
def do_expand(x):
with _Expandable(x) as f:
with _Iterable(f()) as i:
return i
@d.dataclass
class Li():
""" List constructor that will expand its elements. """
vs : list
def __call__(self,env={}):
return self._expand(env)
def _expand(self,env={}):
ys = map(do_expand, self.vs)
return list(fp.reduce(lambda x,y:[*x,*y], ys))
def __add__(self,expander):
return Li([self,expander])
# * The P constructor
# list(it.product([1,0],[1,0]))
# ** helper function
def dict_product(dic):
ks = list(dic.keys())
vals = list(it.product(*[dic[k] for k in ks]))
out = []
for tup in vals:
out.append(dict(zip(ks,tup)))
return out
# a = {'a':[1,2,3],
# 'b':[-1,-2,-3]}
# dict_product(a)
@d.dataclass
class P():
""" Defines key wise cartesian expansion.
{'b':Vs}
"""
dic : dict
def __call__(self,env={}):
return self._expand(env)
def _expand(self,env={}):
new = dict(self.dic)
for k in self.dic:
new[k] = do_expand(self.dic[k])
return dict_product(new)
def __add__(self,expander):
return Li([self,expander])
# We can test that a class satisfies the formal interface
# issubclass(P, MExpander)
# issubclass(Li, MExpander)
# Deprecated:
@d.dataclass
class Vs():
"""Vs will not expand, its children Pretty useless now that we support
iterables as expandable(s). Remains an example of how to
implement otherwise.
DEPRECATED: use a simple list or tuple instead
"""
vs : list
def __call__(self,env={}):
return self._expand(env)
def _expand(self,env={}):
return self.vs
|
983,478 | 96b4515180edca0ff6f6688e8e8c629c6d52a7ec | import logging
from functools import wraps
import redis
from flask import current_app as app, make_response, request
from flask import jsonify
from flask.blueprints import Blueprint
from werkzeug.exceptions import BadRequest, NotFound, Unauthorized
from app.algorithm import Matcher
from app.api.access_control import AccessControl
from app.api.schema import COMPANY_MATCH_BODY, COMPANY_UPDATE_BODY
from app.api.utils import get_verified_data
from app.db.models import HawkUsers
api = Blueprint(name="api", import_name=__name__)
ac = AccessControl()
@ac.client_key_loader
def get_client_key(client_id):
client_key = HawkUsers.get_client_key(client_id)
if client_key:
return client_key
else:
raise LookupError()
@ac.client_scope_loader
def get_client_scope(client_id):
client_scope = HawkUsers.get_client_scope(client_id)
if client_scope:
return client_scope
else:
raise LookupError()
@ac.nonce_checker
def seen_nonce(sender_id, nonce, timestamp):
key = f'{sender_id}:{nonce}:{timestamp}'
try:
if app.cache.get(key):
# We have already processed this nonce + timestamp.
return True
else:
# Save this nonce + timestamp for later.
app.cache.set(key, 'True', ex=300)
return False
except redis.exceptions.ConnectionError as e:
logging.error(f'failed to connect to caching server: {str(e)}')
return True
def json_error(f):
@wraps(f)
def error_handler(*args, **kwargs):
try:
response = f(*args, **kwargs)
except NotFound:
response = jsonify({})
response.status_code = 404
except BadRequest as e:
response = jsonify({'error': e.description})
response.status_code = 400
except Unauthorized:
response = make_response('')
response.status_code = 401
except Exception as e:
logging.error(f'unexpected exception for API request: {str(e)}')
response = make_response('')
response.status_code = 500
raise e
return response
return error_handler
@api.route('/healthcheck/', methods=["GET"])
def healthcheck():
return jsonify({"status": "OK"})
@api.route('/api/v1/company/update/', methods=['POST'])
@json_error
@ac.authentication_required
@ac.authorization_required
def update():
query = get_verified_data(request, COMPANY_UPDATE_BODY)
dnb_match = request.args.get('dnb_match', 'false')
if dnb_match not in ['true', 'false']:
raise BadRequest('invalid dnb_match parameter. needs to be true or false')
dnb_match = dnb_match == 'true'
match = request.args.get('match', 'true' if not dnb_match else 'false')
if match not in ['true', 'false']:
raise BadRequest('invalid match parameter. needs to be true or false')
match = match == 'true'
if match and dnb_match:
raise BadRequest('only one of match and dnb_match parameter can be true')
matcher = Matcher()
matches = matcher.match(query['descriptions'], update=True, match=match, dnb_match=dnb_match)
if match or dnb_match:
result = {'matches': []}
for row in matches:
result['matches'].append({'id': row[0], 'match_id': row[1], 'similarity': row[2]})
return jsonify(result)
else:
return '', 204
@api.route('/api/v1/company/match/', methods=['POST'])
@json_error
@ac.authentication_required
@ac.authorization_required
def match():
query = get_verified_data(request, COMPANY_MATCH_BODY)
dnb_match = request.args.get('dnb_match', 'false')
if dnb_match not in ['true', 'false']:
raise BadRequest('invalid dnb_match parameter. needs to be true or false')
dnb_match = dnb_match == 'true'
matcher = Matcher()
matches = matcher.match(query['descriptions'], update=False, dnb_match=dnb_match)
result = {'matches': []}
for row in matches:
result['matches'].append({'id': row[0], 'match_id': row[1], 'similarity': row[2]})
return jsonify(result)
|
983,479 | f09edf3257dd6a115517d52e1dd70a267c96bf8a |
_g = dict()
def _init():
from ..material import Material as Material
from .elements import carbon, oxygen, hydrogen, nitrogen, lead, tin, nickel, manganese, chromium, iron, silicon, zinc, copper, phosphorus
from .compounds import cellulose, water, fructose, glucose, sucrose, maltose, gluconic_acid, water, acetic_acid
global _g
g = _g
# Density of textile fibers can be done with a gradient column:
# http://fashion2apparel.blogspot.com/2016/12/important-textile-fibers-densities.html
# These values range from 1.55 g/cc for cotton, down to 0.9 g/cc for polypropylene (Meraklon).
# (Meraklon is for diapers and other hygiene products. tmyk)
# These densities are unhelpful for clothing in suitcases, sadly.
# A bale of cotton is around 450 kg/m^3 or 0.45 g/cc.
# https://cerasis.com/calculate-freight-class/ referring to the NMFC book about freight classes
# 4-6 lbs/cubic foot
clothing_density_NMFC_g_cc = 0.096
clothing_density_Bill_g_cc = 0.25 # I think Bill said 0.25 g/cc was about right for clothes
# Detection of explosive materials using nuclear radiation, a critical review, Hussein 1992 cites
# W.J.Rof,J.R.Scot,J.Paciti,HandbokofComonPolymers:Fibres,Films,PlasticsandRubers,CRC Pres,Cleveland,1971.
# Mass density of silk/wool cloths: 200 kg/m3 = 0.2 g/cc
#
# From Bill:
# I think .25 is pretty good.
# If you think about the weight of a fully packed suitcase and divide by the volume, I think you will find it's close.
# I have no source for this, but I have picked up a hell of a lot of suitcases. (literally thousands)
g["cotton_clothes_packed"] = cellulose.as_density(0.25)
# ==== Woods
live_oak_g_cc = 0.977
white_oak_g_cc = 0.710
sugar_maple_g_cc = 0.676
jack_pine_g_cc = 0.461
wood = Material.sum_by_mass([carbon, oxygen, hydrogen, nitrogen], [50, 42, 6, 1])
g["live_oak"] = wood.as_density(live_oak_g_cc)
g["white_oak"] = wood.as_density(white_oak_g_cc)
g["sugar_maple"] = wood.as_density(sugar_maple_g_cc)
g["jack_pine"] = wood.as_density(jack_pine_g_cc)
# ==== Metal alloys
# https://www.makeitfrom.com/material-properties/As-Forged-and-Air-Cooled-M10-C46400-Brass
g["naval_brass"] = Material.sum_by_mass([lead, tin, zinc, copper], [0.5, 0.5, 38, 61], final_density_g_cc=8.0)
# https://www.makeitfrom.com/material-properties/EN-CC481K-CuSn11P-C-Phosphor-Bronze
g["phosphor_bronze"] = Material.sum_by_mass([tin, phosphorus, copper], [11.0, 1.0, 88.0], final_density_g_cc=8.7)
# https://www.makeitfrom.com/material-properties/Half-Hard-201-Stainless-Steel
g["stainless_steel"] = Material.sum_by_mass([silicon, nickel, manganese, chromium, iron], [0.5, 4.5, 6.5, 17, 71.5], final_density_g_cc=7.7)
# ==== Foodstuffs
# From Ball 2007, I will take the water and monosaccharide components and then
# just replace the other disaccharides with glucose. Maltose for instance can
# be a big component of honey and has effects on its crystallization.
honey_g_cc = 1.415 # 0.5*(1.38 + 1.45) per wikipedia
g["honey"] = Material.sum_by_mass([water, fructose, glucose, sucrose, maltose, gluconic_acid],
[17.2, 38.4, 30.3, 1.3, 8.7, 0.57],
honey_g_cc)
# Generic vinegar: FDA regulations state minimum of 5% acidity (http://www.chem.latech.edu/~deddy/chem122m/L04U00Vinegar122.htm)
g["vinegar"] = Material.sum_by_mass([water, acetic_acid], [0.95, 0.05], 1.05) # density: google
# https://www.simplyrecipes.com/a_guide_to_balsamic_vinegar/
# To qualify for official recognition, Balsamic Vinegar of Modena can only be made with the following ingredients:
# - Boiled or concentrated grape must (at least 20% by volume)
# - Wine vinegar (at least 10%)
# - Natural caramel (made by cooking sugar) for color (up to 2%)
# - Aged balsamic vinegar (aged at least 10 years), an unspecified amount, usually negligible
# ==== Explosives
# C4 with RDX:
# 91% RDX
# 5.3% DOS
# 2.1% PIB
# 1.6% motor oil
# Nominal density 1.72658 g/cc
# (Wikipedia)
from .compounds import rdx, dioctyl_sebacate, polyisobutylene, cyclohexene
g["c4_rdx"] = Material.sum_by_mass([rdx, dioctyl_sebacate, polyisobutylene, cyclohexene],
[91.0, 5.3, 2.1, 1.6], 1.72658)
globals().update(g)
def list():
return [k for k in _g.keys()]
_init()
|
983,480 | 6d603359bc08212b0123e4a7d824c4d01a5b3c9c | import os
import PyPDF2
os.chdir('/home/roger/Documents/automate_boring_stuff')
pdfFile = open('meetingminutes1.pdf', 'rb')
reader = PyPDF2.PdfFileReader(pdfFile)
print(reader.numPages)
page = reader.getPage(0)
print(page.extractText())
#Extracting all pages
#for page in range(reader.numPages):
# print(reader.getPage(page).extractText())
pdf1File = open('meetingminutes1.pdf', 'rb')
pdf2File = open('meetingminutes2.pdf', 'rb')
reader1 = PyPDF2.PdfFileReader(pdf1File)
reader2 = PyPDF2.PdfFileReader(pdf2File)
writer = PyPDF2.PdfFileWriter()
for pageNum in range(reader1.numPages):
page = reader1.getPage(pageNum)
writer.addPage(page)
for pageNum in range(reader2.numPages):
page = reader2.getPage(pageNum)
writer.addPage(page)
outputFile = open('combinedminutes.pdf', 'wb')
writer.write(outputFile)
outputFile.close()
pdfFile.close()
pdf1File.close()
|
983,481 | 57b1b37c069d163df1488c381a56a10bdb31ce1b | # -*- coding: utf-8 -*-
# @Time : 2018/11/21 上午12:40
# @Author : lenzi
# @Site : https://github.com/lengzi/
# @File : chinahadoop_mall.py
# @Python : python3.6 chinahadoop_mall.py
# @Software: PyCharm
goods_dict = {
"001": {"name": "MacBookPro", "price": 14999},
"002": {"name": "欧米茄手表 ", "price": 11111},
"003": {"name": "小米笔记本 ", "price": 4999},
"004": {"name": "Ipadmini2 ", "price": 1998},
"005": {"name": "小米8手机 ", "price": 2998},
"006": {"name": "爱马仕腰带 ", "price": 1999},
"007": {"name": "劳力士男表 ", "price": 19999},
"008": {"name": "巴宝莉眼镜 ", "price": 4999},
"009": {"name": "路虎发现四 ", "price": 99999},
}
def print_goods_list(goods_list_dict):
print("-----------欢迎进入小象购物商城-------------")
print("商品编号","\t\t\t","商品名称","\t\t\t","商品价格")
for num in sorted(goods_list_dict):
print(num,"\t\t\t",goods_list_dict[num]["name"],"\t\t\t",goods_list_dict[num]["price"])
def buy_guide():
print("请输入商品编号,购物完毕清输入q:")
def in_list(n,list):
for num in list:
if n == num:
return True
return False
def bye_message():
print("谢谢使用小象购物商城,欢迎再次光临!")
def print_cart(shop_cart):
print("您好!你购置得物品如下:")
print("商品编号", "\t\t\t", "商品名称", "\t\t\t", "商品价格", "\t\t\t", "商品数量", "\t\t\t", "商品总价")
for num in shop_cart:
print(num,"\t\t\t",shop_cart[num]["商品名称"],"\t\t\t",shop_cart[num]["商品单价"],"\t\t\t",shop_cart[num]["商品数量"],"\t\t\t",shop_cart[num]["商品总价"])
def compute_price(shop_cart):
money = 0
for num in shop_cart:
money += (int)(shop_cart[num]["商品总价"])
return money
def main():
print_goods_list(goods_dict)
#buy_guide()
cart = {}
flag1 = "请输入商品编号,结束购物请输入q:"
flag2 = "请输入购买商品的数量:"
flag = flag1
key = input(flag)
cart_num = 0
while key:
if key == "q":
if not cart:
bye_message()
else:
print_cart(cart)
cash = compute_price(cart)
print("购物愉快!请支付金额",cash,"元:")
my_cash = input()
while my_cash:
if not my_cash.isnumeric():
print("请输入正确的数字:")
my_cash = input()
elif int(my_cash) < cash:
print("您支付的金额不足,清重新输入:")
my_cash = input()
else:
break
if int(my_cash) == cash:
print("支付成功!欢迎下次光临")
else:
print("支付成功!找给你",int(my_cash)-cash,"元,欢迎下次惠顾!")
break
else:
print_goods_list(goods_dict)
if not in_list(key,goods_dict) and flag == flag1:
print("商品编号不存在,请重新输入:")
print_goods_list(goods_dict)
key = input(flag)
elif in_list(key,goods_dict):
cart_num = key
flag = flag2
key = input(flag2)
elif not key.isnumeric():
print("请输入数字:")
key = input()
else:
if int(key)>0:
a = int(key)
if cart_num in cart.keys():
a += cart[cart_num]["商品数量"]
cart[cart_num] = {"商品名称": goods_dict[cart_num]["name"], "商品单价": goods_dict[cart_num]["price"], "商品数量":int(a), "商品总价": goods_dict[cart_num]["price"]*int(a)}
print("你的购物车清单如下:")
print_cart(cart)
print_goods_list(goods_dict)
flag = flag1
print("继续购物,请输入商品编号,结束购物请输入q:")
key = input()
else:
print("商品数量不能为0,请重新输入:")
key = input()
if __name__ == '__main__':
main()
|
983,482 | 679326f55beeb6c8cbe8477aa0fca13b6376a436 | '''
PROGRAM DESCRIPITON:
With using Python Numpy module create a game of SUDOKU.
Please consider the following points while creating your project :-
1) Use only the Numpy module not anything else of Python Data Science.
2) Write your program using the OOPS concept of Python.
'''
# PROGRAMMED BY : Badam Jwala Sri Hari
# MAIL ID : jwalasrihari1330@gmail.com
# DATE : 03-09-2021
# PYTHON VERSION: 3.9.7
# NUMPY VERSION : 1.21.2
# CAVEATS : None
# LICENSE : None
import numpy as np
class sudoku:
# Sudoku to be solved
grid = [[5,3,0,0,7,0,0,0,0],
[6,0,0,1,9,5,0,0,0],
[0,9,8,0,0,0,0,6,0],
[8,0,0,0,6,0,0,0,3],
[4,0,0,8,0,3,0,0,1],
[7,0,0,0,2,0,0,0,6],
[0,6,0,0,0,0,2,8,0],
[0,0,0,0,1,9,0,0,5],
[0,0,0,0,0,0,0,0,0]]
# Checks whether a number can be place in a postion or not
def possible(self,row, column, number):
# Is the number appearing in the given row?
for i in range(0,9):
if self.grid[row][i] == number:
return False
# Is the number appearing in the given column?
for i in range(0,9):
if self.grid[i][column] == number:
return False
# Is the number appearing in the given square?
x = (column // 3) * 3
y = (row // 3) * 3
for i in range(0,3):
for j in range(0,3):
if self.grid[y+i][x+j] == number:
return False
return True
def solve(self):
# Iterates over a grid(sudoku)
for row in range(0,9):
for column in range(0,9):
if self.grid[row][column] == 0:
# Checking every possible number in postion
for number in range(1,10):
if self.possible(row, column, number):
self.grid[row][column] = number
self.solve()
# If we are strucked a some point we will backtrack and place Zero
self.grid[row][column] = 0
return
print(np.matrix(self.grid))
obj=sudoku()
obj.solve()
'''
output:
[[5 3 4 6 7 8 1 9 2]
[6 7 2 1 9 5 3 4 8]
[1 9 8 3 4 2 5 6 7]
[8 5 9 7 6 1 4 2 3]
[4 2 6 8 5 3 9 7 1]
[7 1 3 9 2 4 8 5 6]
[9 6 1 5 3 7 2 8 4]
[2 8 7 4 1 9 6 3 5]
[3 4 5 2 8 6 7 1 9]]
'''
|
983,483 | 139d2a91e9647ce652cf6225aec4071258b5194c | from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('' , views.index , name="index"),
path('login/' , views.signin , name="login" ),
path('register/' , views.signup , name="register" ),
path('my_profile' , views.profile_page , name="profile"),
path('logout/' , views.logout_page , name="logout"),
path('update/' , views.update_profile_page , name="update"),
path('doctor_details/<str:slug>/' , views.doctor_details , name="doctor_details"),
] |
983,484 | 440b3855eb1e4457e560aca79b7cb1c4ec9e9f26 | import os,sys,time
from basebot import BaseBot
from marketanalyzer import Analyzer
from exchange import Exchange
class SimpleSurfer(BaseBot):
#def __init__(self,market,budget,tradelimit,candlestick="5m",timeframe="24h"):
def __init__(self,config ):
BaseBot.__init__(self,"SimpleSurfer",config)#market,budget,tradelimit,candlestick,timeframe,"SimpleSurfer")
self.refreshData()
self.append_debug_message("Hello, from SimpleSurfer bot")
def refreshData(self):
self.refreshCandlesticks()
self.analyzer.addIndicator("macd",{})
self.analyzer.addIndicator("bbands",{"timeperiod":20,"nbdevup":2,"nbdevdn":2})
self.analyzer.addIndicator("atr",{"period":14})
self.analyzer.addIndicator("rsi",{"overbought":63,"oversold":40,"period":14})
self.analyzer.process()
def process(self):
self.refreshData()
macd = self.analyzer.getIndicator("macd")
bbands = self.analyzer.getIndicator("bbands")
rsi = self.analyzer.getIndicator("rsi")
atr = self.analyzer.getIndicator("atr")
self.pushSignal("rsi","oversold",rsi.isOversold(),minor=True)
self.pushSignal("rsi","overbought",rsi.isOverbought(),minor=True)
"""
#check if price is rising and closes above the middle band
if ( self.analyzer.last("closed",3) < bbands.middle() and
self.analyzer.last("closed",2) > bbands.middle() and
self.analyzer.last("closed") > bbands.middle() and
self.checkSignal("rsi","oversold",60) is not None ):
self.pushSignal("bband1","buy",10)
#check if price is dropping and closes below the middle band
if ( self.analyzer.last("closed",3) > bbands.middle() and
self.analyzer.last("closed",2) < bbands.middle() and
self.analyzer.last("closed") < bbands.middle() and
self.checkSignal("rsi","overbought",60) is not None ):
self.pushSignal("bband2","sell",10)
"""
messages = []
if self.analyzer.last("closed",2) < bbands.middle(2) and self.analyzer.last("closed") > bbands.middle():
messages.append("price is moving above middle band")
if self.analyzer.last("closed",2) > bbands.middle(2) and self.analyzer.last("closed") < bbands.middle():
messages.append("price is moving below middle band")
if self.analyzer.last("closed") > bbands.top():
messages.append("price is above top band")
if self.analyzer.last("closed") < bbands.low():
messages.append("price is below the lower band")
if (self.analyzer.last("closed",2) > bbands.top(2) and
self.analyzer.last("closed") > bbands.top()):
messages.append("price is moving strong above top band")
if (self.analyzer.last("closed",2) < bbands.low(2) and
self.analyzer.last("closed") < bbands.low()):
messages.append("price is moving strong below lower band")
#check if price goes below the lower band
if ( self.analyzer.last("closed") < bbands.low() and
self.checkSignal("rsi","oversold",60) is not None ):
messages.append("buy signal triggered")
self.pushSignal("bbandLow","buy",50)
#check if price high goes above the top bband
if ( self.analyzer.last("high") > bbands.top() and
self.checkSignal("rsi","overbought",60) is not None ):
messages.append("sell signal triggered")
self.pushSignal("bbandHigh","sell",50)
if messages:
self.debug = []
for msg in messages:
self.debug.append(msg)
else:
self.debug = ["Nothing interesting to report"]
return BaseBot.process(self)
|
983,485 | d76bce25c68512115ef0374df02ed50a4bd3bab2 | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
sitemaps = {
'post': PostSitemap,
}
urlpatterns = [
path('admin/', admin.site.urls),
path('', TemplateView.as_view(template_name="base.html"), name="base"),
# my pages
path('blog/', include('blog.urls', namespace='blog')),
# path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
# more pages
path('404/', TemplateView.as_view(template_name="404.html"), name="404"),
path('dashboard/', TemplateView.as_view(template_name="dashboard.html"), name="dashboard"),
# to users
path('account/', include('account.urls')),
# to social
path('social-auth/', include('social_django.urls', namespace='social')),
# images
path('images/', include('images.urls', namespace='images')),
# to remove
path('buttons/', TemplateView.as_view(template_name="to_remove/buttons.html"), name="buttons"),
path('cards/', TemplateView.as_view(template_name="to_remove/cards.html"), name="cards"),
path('tables/', TemplateView.as_view(template_name="to_remove/tables.html"), name="tables"),
path('charts/', TemplateView.as_view(template_name="to_remove/charts.html"), name="charts"),
path('animation/', TemplateView.as_view(template_name="to_remove/utilities-animation.html"), name="animation"),
path('border/', TemplateView.as_view(template_name="to_remove/utilities-border.html"), name="border"),
path('color/', TemplateView.as_view(template_name="to_remove/utilities-color.html"), name="color"),
path('other/', TemplateView.as_view(template_name="to_remove/utilities-other.html"), name="other"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
983,486 | 3e764a8f2f8e274a9a44019f38fcf9aa8b92cc08 | import numpy as np
np.savetxt(output_file, fwd_bwd_result["Bprob"],fmt='%.8f')
return
|
983,487 | 674ea192d2de5f04aeb08b09c5ca193c9d6c256b | name ='Emmanuel Ani'
email = 'emmanuelani234@gmail.com'
slack_username = '@emmanuelani234'
biostack = 'Data Science'
twitter_handle ='@emmanuelani234'
def HammingDist(string1, string2):
i = 0
count = 0
while (i<len(string1)):
if (string1[i] != string2[i]):
count +=1
i +=1
return count
print(f'Name:{name}')
print(f'Email:{email}')
print(f'<Slack_username>:{slack_username}')
print(f'<Twitter_handle>:{twitter_handle}')
print(f'Biostack:{biostack}')
a = HammingDist(slack_username, twitter_handle)
print(f'<Hamming_Distance>:{a}')
|
983,488 | b5dcc30557e949d4157cb94b420b048fa93fb020 | from dataclasses import dataclass
from .validity_parameter_assignment_ref_structure import ValidityParameterAssignmentRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class CustomerPurchaseParameterAssignmentRefStructure(ValidityParameterAssignmentRefStructure):
pass
|
983,489 | 2c108ebdffcd9eac44a2fcd77dbbbdab0cdef910 | # -*- coding: utf-8 -*-
# @Time : 2020/7/16 15:28
# @Author : piguanghua
# @FileName: Longest Increasing Subsequence.py
# @Software: PyCharm
import numpy as np
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
f = [1 for i in range(n)]
#f[0] = 1
for i in range(1, n):
for j in range(i):
if nums[i] > nums[j]:
f[i] = max([ f[i], f[j] + 1 ])
return max(f)
if __name__ == '__main__':
nums = [10,9,2,5,3,7,101,18]
print( Solution().lengthOfLIS(nums) ) |
983,490 | a0d046c8100adfb9d02853b75ce1598290b343bb | import kivy
from kivy.app import App
import csv
import os
from os.path import join, isfile
from datetime import date, time, datetime
import math
class Tides:
def __init__(self):
self.tidefile=''
self.readok = True
self.rawtides = []
self.tideplot=[] # contains points suitable for graph
self.nowplot=[] # contains points suitable for graph
self.tides=[]
self.nextlow=[0,0]
self.nexthigh=[0,0]
self.epoch = datetime(1970,1,1)
self.epochhours = 0
self.epochtimenow = None
self.tideheightnow = 0
self.maxheight = 0
self.tidestations = []
self.tidestation = ''
def datehours(self, dt):
delta_time = (dt - self.epoch).total_seconds()/3600
return delta_time
def dechours(self, yr, mn, d, t):
"""
convert time to decimal hours
"""
dt = datetime(int(yr), int(mn), int(d))
if self.epochhours == 0:
self.epochhours = self.datehours(dt)
e = self.datehours(dt)
result=-1.0
if len(t) > 0:
(h, m) = t.split(':')
fh = float(h)
fm = float(m)
r = fh + fm/60.0
result = e + round(r, 2)
# print "dechours: " + str(result) + " yr=" + str(yr) + " mn=" + str(mn) + " d=" + str(d) + " " + t
return result
def dedechours(self, tm):
"""
convert decimal hours back to time
"""
pass
def convert_decimal_to_time(self, decimal):
"""
Converts decimal time to displayable time: e.g. 03:14
(this program is not actually interested in seconds, so just round)
"""
values_in_seconds = [('days', 60*60*24), ('hours', 60*60), ('minutes', 60), ('seconds', 1)]
output_dict = {}
num_seconds = int(decimal * 3600)
for unit, worth in values_in_seconds:
output_dict[unit] = num_seconds // worth
num_seconds = num_seconds % worth
if output_dict['seconds'] > 30:
output_dict['seconds'] = 0
output_dict['minutes'] += 1
if output_dict['minutes'] == 59:
output_dict['minutes'] = 0
output_dict['hours'] += 1
if output_dict['hours'] == 23:
output_dict['hours'] = 0
output_dict['days'] += 1
fmt = str(output_dict['hours']).rjust(2, '0') + ':' + str(output_dict['minutes']).rjust(2, '0')
return output_dict, fmt
def setport(self, port):
self.tidefile = port
ex = os.path.isfile(self.tidefile)
return ex
def readport(self):
if self.tidefile != '':
try:
with open(self.tidefile, 'rb') as csvfile:
intides = csv.reader(csvfile)
self.rawtides = list(intides)
csvfile.close()
self.readok = True
except IOError as e:
print "Failed to read csv({0}): {1}".format(e.errno, e.strerror)
print "Failed to read csv " + self.tidefile
return self.readok
def tideheight(self, timetofindheightat, timeofprevioustide, timeofnexttide, heightofprevioustide, heightofnexttide):
A = math.pi * ( ( timetofindheightat - timeofprevioustide ) / ( timeofnexttide - timeofprevioustide ) + 1 )
height = heightofprevioustide + (heightofnexttide - heightofprevioustide) * ((math.cos(A) +1 ) / 2)
return height
def gettides(self, ndays=3):
if self.readok:
count = -1
#
d = int(datetime.now().strftime("%d"))
m = int(datetime.now().strftime("%m"))
y = int(datetime.now().strftime("%Y"))
ddate = date(y, m, d)
timenow = datetime.now().strftime("%H:%M")
self.epochtimenow = self.dechours(y, m, d, timenow)
doneit = False
for n in self.rawtides:
# print n
count += 1
if count > 2:
dm = int (n[0])
mn = int (n[2])
yr = int (n[3])
dmdate = date(yr, mn, dm)
delta = dmdate - ddate
deltadays = delta.days
doit = False
if deltadays >= 0:
doit = True
if ndays > 0:
if deltadays >= ndays:
doit = False
if doneit:
break # already done what we want
if doit:
decimalhours = self.dechours(yr, mn, dm, n[4])
if decimalhours > 0:
self.tides.append([ decimalhours, float(n[5])])
decimalhours = self.dechours(yr, mn, dm, n[6])
if decimalhours > 0:
self.tides.append([ decimalhours, float(n[7])])
decimalhours = self.dechours(yr, mn, dm, n[8])
if decimalhours > 0:
self.tides.append([ decimalhours, float(n[9])])
decimalhours = self.dechours(yr, mn, dm, n[10])
if decimalhours > 0:
self.tides.append([ decimalhours, float(n[11])])
# create tide plot points - iteration one every 15 minutes
#granularity = .25 # 15 minutes is .25 of an hour
granularity = 1.0 # every hour
"""
data.tides.tides is a list of tides and heights
we plot each tide as is and then calculate iterations between those known tides.
The number of iterations is t(n+1)-t(n) / granularity
We dont want to deal with the first and last tide - as they are fixed
"""
# e = self.data.tides.epochhours # hours to subtract from epoch decimal hours
e = 0
self.maxheight = 0
maxcount = len(self.tides)
count = 0
ok = True
while ok:
if count >= maxcount:
break
if count == 0 or count >= maxcount - 1: # at first or last element
self.tideplot.append((self.tides[count][0]-e, self.tides[count][1])) # the actual tides
else:
niter = int((self.tides[count+1][0] - self.tides[count][0]) / granularity)
t = self.tides[count-1][0]
t1 = self.tides[count-1][0]
h1 = self.tides[count-1][1]
t2 = self.tides[count][0]
h2 = self.tides[count][1]
if t1 < self.epochtimenow < t2: # if now is between the tides
# nexthigh and nextlow will be converted below and added to the self.nexthigh[0] and
# self.nextlow[0] (as displayable strings)
nexthigh = t2
self.nexthigh[1] = h2
nextlow = self.tides[count+1][0]
self.nextlow[1] = self.tides[count+1][1]
if self.nexthigh[1] < self.nextlow[1]:
nextlow = t2
self.nextlow[1] = self.tides[count][1]
nexthigh = self.tides[count+1][0]
self.nexthigh[1] = self.tides[count+1][1]
nexthigh -= self.epochhours
nextlow -= self.epochhours
junk, fmt = self.convert_decimal_to_time(nexthigh)
self.nexthigh[0] = fmt
junk, fmt = self.convert_decimal_to_time(nextlow)
self.nextlow[0] = fmt
if len(self.nowplot) == 0:
self.nowplot.append((self.epochtimenow-e, 0))
self.tideheightnow = self.tideheight(self.epochtimenow, t1, t2, h1, h2)
for p in range(niter):
t += granularity
h = self.tideheight(t, t1, t2, h1, h2)
self.tideplot.append((t-e, h))
self.maxheight = max(self.maxheight, h)
self.tideplot.append((self.tides[count][0]-e, self.tides[count][1])) # the actual tides
count += 1
return count
|
983,491 | 46052ff055940f0dd62ceeb9d4fba129cd7e81e8 | # !/usr/bin/python3.8
# -*- coding: utf-8 -*-
# @Time : 2020/7/23 下午2:21
# @Author: 张伟
# @EMAIL: Jtyoui@qq.com
# @Notes : 训练文件
import tensorflow as tf
from config import *
from entity_extraction import DealText, NERModel
dt = DealText(TRAIN_DATA)
words, labels = dt.reader_text()
word_sequence = dt.get_sequence(words, name='words')
label_sequence = dt.get_sequence(labels, name='labels')
word_sequence = tf.keras.preprocessing.sequence.pad_sequences(sequences=word_sequence,
maxlen=int(MAX_WORD_LENGTH),
padding='post',
dtype='int32',
truncating='post')
label_sequence = tf.keras.preprocessing.sequence.pad_sequences(sequences=label_sequence,
maxlen=int(MAX_WORD_LENGTH),
padding='post',
dtype='int32',
truncating='post')
tensor_slices = tf.data.Dataset.from_tensor_slices((word_sequence, label_sequence))
model = NERModel()
optimizers = tf.keras.optimizers.Adam(learning_rate=float(LEARNING_RATE))
for epoch in range(int(EPOCHS)):
dataset = tensor_slices.shuffle(buffer_size=int(BUFFER_SIZE)).batch(batch_size=int(BATCH_SIZE))
for index, (word, label) in enumerate(dataset):
with tf.GradientTape() as tape:
y_pred, text_lens, log_likelihood = model.call(word=word, label=label)
loss = - tf.reduce_mean(log_likelihood)
grads = tape.gradient(target=loss, sources=model.trainable_variables)
optimizers.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
if index % 20 == 0:
print(f'训练中......第{epoch}次迭代中的第{index}个次数,当前损失值为:{loss}')
print('训练完毕!正在保存')
tf.saved_model.save(model, SAVE_MODEL_DIR, signatures={'call': model.call})
|
983,492 | af869696b69d9fc430c5461dd2a50cdcfb85c877 | n,m,x = map(int,input().split())
books = [list(map(int,input().split())) for _ in range(n)]
ans = 12 * (10**5) + 1
for i in range(2**n):
kingaku = 0
rikaido = [0] * m
for j in range(n):
if (i >> j) & 1:
kingaku += books[j][0]
for k in range(m):
rikaido[k] += books[j][k+1]
if all((a >= x for a in rikaido)):
ans = min(ans,kingaku)
if ans != 12 * (10**5) + 1:
print(ans)
else:
print("-1") |
983,493 | bec8aef4c4945034f7b567f3a659afc4172936fc | # coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreatePersistentStorageResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str',
'storage_metadata': 'StorageMetadata',
'create_time': 'datetime',
'user_claim_count': 'int',
'share_claim_count': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'storage_metadata': 'storage_metadata',
'create_time': 'create_time',
'user_claim_count': 'user_claim_count',
'share_claim_count': 'share_claim_count'
}
def __init__(self, id=None, name=None, storage_metadata=None, create_time=None, user_claim_count=None, share_claim_count=None):
"""CreatePersistentStorageResponse
The model defined in huaweicloud sdk
:param id: WKS存储ID
:type id: str
:param name: 名称
:type name: str
:param storage_metadata:
:type storage_metadata: :class:`huaweicloudsdkworkspaceapp.v1.StorageMetadata`
:param create_time: 创建时间
:type create_time: datetime
:param user_claim_count: 个人目录声明数量
:type user_claim_count: int
:param share_claim_count: 共享目录声明数量
:type share_claim_count: int
"""
super(CreatePersistentStorageResponse, self).__init__()
self._id = None
self._name = None
self._storage_metadata = None
self._create_time = None
self._user_claim_count = None
self._share_claim_count = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if storage_metadata is not None:
self.storage_metadata = storage_metadata
if create_time is not None:
self.create_time = create_time
if user_claim_count is not None:
self.user_claim_count = user_claim_count
if share_claim_count is not None:
self.share_claim_count = share_claim_count
@property
def id(self):
"""Gets the id of this CreatePersistentStorageResponse.
WKS存储ID
:return: The id of this CreatePersistentStorageResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CreatePersistentStorageResponse.
WKS存储ID
:param id: The id of this CreatePersistentStorageResponse.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this CreatePersistentStorageResponse.
名称
:return: The name of this CreatePersistentStorageResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreatePersistentStorageResponse.
名称
:param name: The name of this CreatePersistentStorageResponse.
:type name: str
"""
self._name = name
@property
def storage_metadata(self):
"""Gets the storage_metadata of this CreatePersistentStorageResponse.
:return: The storage_metadata of this CreatePersistentStorageResponse.
:rtype: :class:`huaweicloudsdkworkspaceapp.v1.StorageMetadata`
"""
return self._storage_metadata
@storage_metadata.setter
def storage_metadata(self, storage_metadata):
"""Sets the storage_metadata of this CreatePersistentStorageResponse.
:param storage_metadata: The storage_metadata of this CreatePersistentStorageResponse.
:type storage_metadata: :class:`huaweicloudsdkworkspaceapp.v1.StorageMetadata`
"""
self._storage_metadata = storage_metadata
@property
def create_time(self):
"""Gets the create_time of this CreatePersistentStorageResponse.
创建时间
:return: The create_time of this CreatePersistentStorageResponse.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this CreatePersistentStorageResponse.
创建时间
:param create_time: The create_time of this CreatePersistentStorageResponse.
:type create_time: datetime
"""
self._create_time = create_time
@property
def user_claim_count(self):
"""Gets the user_claim_count of this CreatePersistentStorageResponse.
个人目录声明数量
:return: The user_claim_count of this CreatePersistentStorageResponse.
:rtype: int
"""
return self._user_claim_count
@user_claim_count.setter
def user_claim_count(self, user_claim_count):
"""Sets the user_claim_count of this CreatePersistentStorageResponse.
个人目录声明数量
:param user_claim_count: The user_claim_count of this CreatePersistentStorageResponse.
:type user_claim_count: int
"""
self._user_claim_count = user_claim_count
@property
def share_claim_count(self):
"""Gets the share_claim_count of this CreatePersistentStorageResponse.
共享目录声明数量
:return: The share_claim_count of this CreatePersistentStorageResponse.
:rtype: int
"""
return self._share_claim_count
@share_claim_count.setter
def share_claim_count(self, share_claim_count):
"""Sets the share_claim_count of this CreatePersistentStorageResponse.
共享目录声明数量
:param share_claim_count: The share_claim_count of this CreatePersistentStorageResponse.
:type share_claim_count: int
"""
self._share_claim_count = share_claim_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreatePersistentStorageResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
983,494 | 729219b167fe67cf49759fcdc82621245b65d081 | x = y = z = "Apple"
file = open("mysfirstfile.txt", "w")
file.write("Hello! This is Timila writing to the file")
file.close()
# create your own file, write something in it.
# open up this file, read the contents and print them out.
# read: r write: w; append: a; create: x, |
983,495 | a05f72ce336cb86135addb99b6bbd5fdbf826164 | from django.shortcuts import render
from django.views.generic.edit import FormView
from .forms import RegisterForm, LoginForm
from django.shortcuts import redirect
from django.contrib.auth.hashers import make_password
from .models import Fcuser
# Create your views here.
def index(request):
return render(request, 'index.html', {"email": request.session.get('user')})
class RegisterForm(FormView):
template_name = 'register.html'
form_class = RegisterForm
success_url = '/login'
def form_valid(self, form):
fcuser = Fcuser(email=form.data.get('email'),
password=make_password(form.data.get("password")),
level='user'
)
fcuser.save()
return super().form_valid(form)
class LoginForm(FormView):
template_name = 'login.html'
form_class = LoginForm
success_url = '/'
def form_valid(self, form):
self.request.session['user'] = form.data.get("email")
return super().form_valid(form)
def logout(request):
if 'user' in request.session:
del(request.session['user'])
return redirect('/login')
|
983,496 | 4216017947e2026f3cd11703c6b055ee0f892cc8 | from os import environ
class Config:
ENV=environ.get("SKOB_AUTHZ_ENV", "production" )
DBGUG=int(environ.get("SKOB_AUTHZ_DBGUG", "0" ))
TESTING=int(environ.get("SKOB_AUTHZ_TESTING", "0" ))
SECRET=environ.get("SKOB_AUTHZ_SEKCET", "VERY-HARD-SECURE-SEKRET-CODE" )
JWT_ALGO=environ.get("SKOB_AUTHZ_ALGO", "HS512" )
JWT_TOKEN_LIFETIME=int(environ.get("SKOB_AUTHZ_JWT_TOKEN_LIFETIME", "86400" ))
#### User Configuration ####
USER_DEFAULT_ROLE=environ.get("SKOB_AUTHZ_USER_DEFAULT_ROLE", "member")
USER_DEFAULT_STATUS=environ.get("SKOB_AUTHZ_USER_DEFAULT_STATUS", "inactive")
#### Database Configuration ####
SQLALCHEMY_DATABASE_URI = environ.get("SKOB_AUTHZ_SQLALCHEMY_DATABASE_URI", None)
SQLALCHEMY_TRACK_MODIFICATIONS = TESTING
|
983,497 | dd8e701050dc8cc23f0784e97b5c6c51aa006a99 | # QSBK.py
"""
a simple crawler of qsbk
"""
import urllib2
import re
url = 'http://www.qiushibaike.com/hot/page/'
headers = {
"Host": 'www.qiushibaike.com',
"User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36'
#"Referer": ''
}
class qsbk:
"""
a crawler of qsbk
"""
def __init__(self):
# page index
self.page = 1
# default url
self.url = url
# header
self.header = headers
self.enable = False
# store story
self.stories = []
def get_page(self, page_index):
"""
Using page_index to get this page of html.
:param page_index: int
:return: int
"""
try:
global url
current_url = url + str(page_index)
# create a request
request = urllib2.Request(current_url, headers=self.header)
# get page of page_index
response = urllib2.urlopen(request)
# coding to utf-8
return response.read()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'connect to qsbk error', e.reason
return None
def get_page_item(self, page_index):
"""
Using get_page to get page items of storing a list.
:param page_index: int
:return: list
"""
# get page from self.get_page(page_index)
page = self.get_page(page_index)
# check return value
if not page:
print 'page is None'
return None
# regular expression matching
pattern = r'<div class="author clearfix">.*?<h2>(.*?)</h2>.*?' \
r'<div class="content">(.*?)</div>.*?<span.*?class="number">(.*?)</i>'
pattern = re.compile(pattern, re.S)
items = re.findall(pattern, page)
page_stories = []
for item in items:
replace_br = re.compile('<br/>')
text = re.sub(replace_br, "\n", item[1])
# item[0] is author item[1] is content item[2] is vote
page_stories.append([item[0].strip(), text.strip(), item[2].strip()])
return page_stories
def load_page(self):
"""
Loading a page with storing a list.
:return:
"""
if self.enable == True:
if len(self.stories) < 2:
# get a page stories
page_stories = self.get_page_item(self.page)
if page_stories:
self.stories.append(page_stories)
self.page += 1
def print_one_story(self, page_story, now_page):
"""
Print one story with special format.
:param page_story: list
:param now_page: int
:return:
"""
for story in page_story:
# waiting for input.
input = raw_input()
# load a page stories
self.load_page()
# whether or not quit
if input == 'Q' or input == 'q':
self.enable = False
return
print 'page:%d\tauthor:%s\tvote:%s\n\t%s' %(now_page, story[0], story[2], story[1])
def start(self):
"""
Starting to crawler page.
:return:
"""
print "starting to crawler qsbk's page(Enter Q or q to quit)"
print
self.enable = True
self.load_page()
# a variabel to control counts
nowpage = 0
while self.enable:
if len(self.stories) > 0:
# get a page stories
page_stories = self.stories[0]
nowpage += 1
del self.stories[0]
# print stories
self.print_one_story(page_stories, nowpage)
if __name__ == '__main__':
spider = qsbk()
spider.start()
|
983,498 | 0b88b2503357a74f684f658dc987f4e1937649a3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Sample code to generate caption
'''
import numpy as np
from image_reader import Image_reader
from caption_generator import Caption_generator
#Instantiate image_reader with GoogleNet mean image
mean_image = np.array([104, 117, 123]).reshape((3,1,1))
image_reader=Image_reader(mean=mean_image)
#Instantiate caption generator
caption_model_place='../models/caption_model.chainer'
cnn_model_place='../data/bvlc_googlenet_caffe_chainer.pkl'
index2word_place='../work/index2token.pkl'
caption_generator=Caption_generator(caption_model_place=caption_model_place,cnn_model_place=cnn_model_place,index2word_place=index2word_place)
#The preparation is done
#Let's ganarate caption for a image
#First, read an image as numpy array
image_file_path='../images/test_image.jpg'
image=image_reader.read(image_file_path)
#Next, put the image into caption generator
#The output structure is
# [caption,caption,caption,...]
# caption = {"sentence":This is a generated sentence, "probability": The probability of the generated sentence}
captions=caption_generator.generate(image)
#For example, if you want to print all captions
for caption in captions:
sentence=caption['sentence']
probability=caption['probability']
print " ".join(sentence),probability
#Let's do for another image
image_file_path='../images/COCO_val2014_000000241747.jpg'
image=image_reader.read(image_file_path)
captions=caption_generator.generate(image)
for caption in captions:
sentence=caption['sentence']
probability=caption['probability']
print " ".join(sentence),probability
|
983,499 | 131b960bfe8000b77d080db2221ab0d37adcfe03 | """
2. Написать программу сложения и умножения двух шестнадцатеричных чисел.
При этом каждое число представляется как массив, элементы которого — цифры числа.
Например, пользователь ввёл A2 и C4F.
Нужно сохранить их как [‘A’, ‘2’] и [‘C’, ‘4’, ‘F’] соответственно.
Сумма чисел из примера: [‘C’, ‘F’, ‘1’], произведение - [‘7’, ‘C’, ‘9’, ‘F’, ‘E’].
"""
from collections import deque
from itertools import dropwhile
from functools import reduce, partial
import random
numbase16 = ('0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F')
def add_digits(dx, dy, numbase):
result_right = deque(numbase, maxlen=len(numbase))
result_left = deque(numbase[0] * len(numbase), maxlen=len(numbase))
if dx != numbase[0]:
for digit in numbase[1:]:
result_right.rotate(-1)
result_left.append(numbase[1])
if dx == digit:
break
for digit, left, right in zip(numbase, result_left, result_right):
if dy == digit:
return left, right
def remove_leading_zeros(digits, numbase):
return list(dropwhile(lambda x: x == numbase[0], digits))
def add(x, y, numbase):
xdeque = deque(x)
ydeque = deque(y)
result = deque()
from_right = numbase[0]
if len(ydeque) > len(xdeque):
xdeque, ydeque = ydeque, xdeque
xdeque.appendleft(numbase[0])
while ydeque:
xdigit = xdeque.pop()
ydigit = ydeque.pop()
left1, right = add_digits(xdigit, ydigit, numbase=numbase)
left2, right = add_digits(right, from_right, numbase=numbase)
from_right = left1 if left2 == numbase[0] else left2
result.appendleft(right)
while xdeque:
xdigit = xdeque.pop()
from_right, right = add_digits(xdigit, from_right, numbase=numbase)
result.appendleft(right)
return remove_leading_zeros(result, numbase)
def mul_digits(dx, dy, numbase):
if dx == numbase[0] or dy == numbase[0]:
return numbase[0], numbase[0]
left = numbase[0]
right = numbase[0]
for digit in numbase[1:]:
_sum = add([left, right], [numbase[0], dy], numbase=numbase)
left, right = [numbase[0], *_sum][-2:]
if dx == digit:
break
return left, right
def mul(x, y, numbase):
if x == [numbase[0]] or y == [numbase[0]]:
return [numbase[0]]
if x == [numbase[1]]:
return y
if y == [numbase[1]]:
return x
xdeque = deque(x)
ydeque = deque(y)
if len(ydeque) > len(xdeque):
xdeque, ydeque = ydeque, xdeque
terms = deque()
ypadding = deque()
while ydeque:
xpadding = deque()
ydigit = ydeque.pop()
xcopy = xdeque.copy()
while xcopy:
xdigit = xcopy.pop()
term = deque(mul_digits(xdigit, ydigit, numbase=numbase))
term.extend(xpadding)
term.extend(ypadding)
terms.append(term)
xpadding.append(numbase[0])
ypadding.append(numbase[0])
add_base = partial(add, numbase=numbase)
return reduce(add_base, terms)
# Testing
num_1 = ['0', '0', 'A', '2']
num_2 = ['0', 'C', '4', 'F']
print(add(num_1, num_2, numbase=numbase16))
print(mul(num_1, num_2, numbase=numbase16))
assert add(num_1, num_2, numbase=numbase16) == ['C', 'F', '1']
assert mul(num_1, num_2, numbase=numbase16) == ['7', 'C', '9', 'F', 'E']
for _ in range(100):
a = random.randint(0, 16**4)
b = random.randint(0, 16**10)
a_b = a + b
hex_a, hex_b, hex_ab = map(lambda x: hex(x).upper()[2:], (a, b, a_b))
operation_ab = ''.join(add(hex_a, hex_b, numbase=numbase16))
assert hex_ab == operation_ab
a_b = a * b
hex_a, hex_b, hex_ab = map(lambda x: hex(x).upper()[2:], (a, b, a_b))
operation_ab = ''.join(mul(hex_a, hex_b, numbase=numbase16))
assert hex_ab == operation_ab
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.