index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,000 | ba2b2fd8d77cf59e57d3b70b6017f76c63f872d6 | import math
def nCr(n,r):
f = math.factorial
return int(f(n) / f(r) / f(n-r))
print(nCr(40, 20)) |
987,001 | 717557bc4ee46e4ce4ea427b4c931e35598c1b26 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ==============================================================================
import sys
import os
import lxml.objectify
import lxml.etree
# ==============================================================================
f = sys.stdin.read()
data = lxml.objectify.parse(f).getroot()
realm = os.path.basename(f).split("_")[0]
# check all auth methods present
for i in data.EAPIdentityProvider.AuthenticationMethods:
s = i.find("AuthenticationMethod") # search AuthenticationMethod in i
if s != None: # found AuthenticationMethod
s = s.find("ServerSideCredential") # search ServerSideCredential in subtree
if s != None: # found ServerSideCredential
s = s.find("CA") # search CA in subtree
if s != None: # found CA
print(lxml.etree.tostring(i.AuthenticationMethod.ServerSideCredential.CA).decode('utf-8'))
else:
print("No certificate present in profile " + os.path.basename(f) + " for AuthenticationMethod " + i.AuthenticationMethod.EAPMethod.Type.text + " for realm " + realm)
sys.exit(1)
|
987,002 | f7a600fa4ec9bb74e2fee7eaf718f9dcf9da8c9d | from pathlib import Path
def validate_dir(path: Path):
return path.exists() and path.is_dir()
def clear_dir(path: Path):
for child in path.iterdir():
if child.is_file():
child.unlink(missing_ok=True)
else:
clear_dir(child)
|
987,003 | 2a303dd1067499b95259ed83e188eb8f393bc758 | #!/usr/bin/env python
#######
####### permute_wiki.py
#######
####### Copyright (c) 2010 Ben Wing.
#######
from __future__ import with_statement
import random
import re
import sys
from nlputil import *
from process_article_data import *
"""
We want to randomly permute the articles and then reorder the articles
in the dump file accordingly. A simple algorithm would be to load the
entire dump file into memory as huge list, permute the list randomly,
then write the results. However, this might easily exceed the memory
of the computer. So instead, we split the task into chunks, proceeding
as follows, keeping in mind that we have the list of article names
available in a separate file:
1. The dump consists of a prolog, a bunch of articles, and an epilog.
2. (The 'permute' step:) Take the full list of articles, permute randomly
and output again.
3. (The 'split' step:) Split the dump into pieces, of perhaps a few GB each,
the idea being that we can sort each piece separately and concatenate the
results. (This is the 'split' step.) The idea is that we first split the
permuted list of articles into some number of pieces (default 8), and
create a mapping listing which split each article goes in; then we create
a file for each split; then we read through the dump file, and each time we
find an article, we look up its split and write it to the corresponding
split file. We also write the prolog and epilog into separate files.
Note that in this step we have effectively done a rough sort of the
articles by split, preserving the original order within each split.
4. (The 'sort' step:) Sort each split. For each split, we read the permuted
article list into memory to get the proper order, then we read the entire
split into memory and output the articles in the order indicated in the
article list.
5. Concatenate the results.
Note that this might be a perfect task for Hadoop; it automatically does
the splitting, sorting and merging.
"""
all_articles = []
def read_article_data(filename):
def process(art):
all_articles.append(art)
infields = read_article_data_file(filename, process,
maxtime=Opts.max_time_per_stage)
return infields
def write_permutation(infields):
random.shuffle(all_articles)
errprint("Writing combined data to stdout ...")
write_article_data_file(sys.stdout, outfields=infields,
articles=all_articles)
errprint("Done.")
def break_up_xml_dump(infile):
prolog = ''
inpage = False
for x in infile:
if re.match('.*<page>', x):
thispage = [x]
inpage = True
break
else:
prolog += x
if prolog:
yield ('prolog', prolog)
thisnonpage = ''
for x in infile:
if inpage:
if re.match('.*</page>', x):
inpage = False
thispage.append(x)
thisnonpage = ''
yield ('page', ''.join(thispage))
else:
thispage.append(x)
else:
if re.match('.*<page>', x):
if thisnonpage:
yield ('nonpage', thisnonpage)
thispage = [x]
inpage = True
else:
thisnonpage += x
if inpage:
warning("Saw <page> but no </page>")
if thisnonpage:
yield ('epilog', thisnonpage)
def get_id_from_page(text):
m = re.match('(?s).*?<id>(.*?)</id>', text)
if not m:
warning("Can't find ID in article; beginning of article text follows:")
maxlen = min(100, len(text))
errprint(text[0:maxlen])
return -1
id = m.group(1)
try:
id = int(id)
except ValueError:
print "Exception when parsing %s, assumed non-int" % id
return -1
return id
def split_files(infields, split_prefix, num_splits):
errprint("Generating this split article-table files...")
splits = {}
num_arts = len(all_articles)
splitsize = (num_arts + num_splits - 1) // num_splits
for i in xrange(num_splits):
minval = i * splitsize
maxval = min(num_arts, (i + 1) * splitsize)
outarts = []
for j in xrange(minval, maxval):
art = all_articles[j]
splits[art.id] = i
outarts.append(art)
with open("%s.%s.articles" % (split_prefix, i), 'w') as outfile:
write_article_data_file(outfile, outfields=infields, articles=outarts)
# Clear the big array when we're done with it
del all_articles[:]
splitfiles = [None]*num_splits
for i in xrange(num_splits):
splitfiles[i] = open("%s.%s" % (split_prefix, i), 'w')
errprint("Splitting the dump....")
status = StatusMessage("article")
for (type, text) in break_up_xml_dump(sys.stdin):
if type == 'prolog':
with open("%s.prolog" % split_prefix, 'w') as prolog:
prolog.write(text)
elif type == 'epilog':
with open("%s.epilog" % split_prefix, 'w') as epilog:
epilog.write(text)
elif type == 'nonpage':
warning("Saw non-page text %s" % text)
else:
id = get_id_from_page(text)
if id not in splits:
warning("Can't find article %s in article data file" % id)
else:
splitfiles[splits[id]].write(text)
if status.item_processed(maxtime=Opts.max_time_per_stage):
errprint("Interrupting processing")
break
def sort_file():
all_pages = {}
for (type, text) in break_up_xml_dump(sys.stdin):
if type != 'page':
warning("Shouldn't see type '%s' in split file: %s" % (type, text))
else:
id = get_id_from_page(text)
all_pages[id] = text
for art in all_articles:
text = all_pages.get(art.id, None)
if text is None:
warning("Didn't see article ID %s in XML file" % art.id)
else:
sys.stdout.write(text)
############################################################################
# Main code #
############################################################################
class PermuteWikipediaDumpProgram(NLPProgram):
def populate_options(self, op):
op.add_option("-a", "--article-data-file",
help="""File containing all the articles.""")
op.add_option("-s", "--number-of-splits", type='int', default=8,
help="""Number of splits.""")
op.add_option("--split-prefix", help="""Prefix for split files.""")
op.add_option("-m", "--mode", type='choice',
default=None, choices=['permute', 'split', 'sort'],
help="""Format of evaluation file(s). Default '%default'.""")
def handle_arguments(self, opts, op, args):
global Opts
Opts = opts
self.need('mode')
self.need('article_data_file')
if opts.mode == 'split':
self.need('split_prefix')
def implement_main(self, opts, params, args):
infields = read_article_data(opts.article_data_file)
if opts.mode == 'permute':
write_permutation(infields)
elif opts.mode == 'split':
split_files(infields, opts.split_prefix, opts.number_of_splits)
elif opts.mode == 'sort':
sort_file()
PermuteWikipediaDumpProgram()
|
987,004 | 9abd17e032220c643768b81df660c8c8268f63a3 | # Brian Blaylock
# March 1, 2017 Toothaches are not fun
"""
Use python to execute an rclone command that copies HRRR files from the
horel-group/archive/models/ to the horelS3:HRRR archive buckets.
This script should be run by the mesohorse user on meso1.
Requirements:
rclone <- module load rclone
rclone-beta <- should be installed in this horel-group/archive_s3 directory
What this script does:
1) For each model type [hrrr, hrrrX, hrrrAK], copy the file to the horelS3
archive. Special cases for each model.
2) Changes permissions to public, so it can be downloaded via http
3) Create .idx file and save in hore-group/archive/HRRR
"""
from datetime import date, datetime, timedelta
import os
import multiprocessing #:)
import numpy as np
# =============================================================================
# Introductory Stuff
# =============================================================================
# Dates, start and end
if datetime.now().hour < 15:
# if it before noon (local) then get yesterdays date
# 1) maybe the download script ran long and it's just after midnight
# 2) mabye you need to rerun this script in the morning
DATE = date.today() -timedelta(days=1)
else:
# it's probably after 6 local
DATE = date.today()
# rclone config file
config_file = '/scratch/local/mesohorse/.rclone.conf' # meso1 mesohorse user
model_HG_names = {1:'hrrr', 2:'hrrrX', 3:'hrrrAK'} # name in horel-group/archive
model_S3_names = {1:'oper', 2:'exp', 3:'alaska'} # name in horelS3:
file_types = ['sfc', 'prs', 'subh'] # model file file_types
# =============================================================================
def create_idx(for_this_file, put_here):
"""
Create a .idx file and move to horel-group/archive/HRRR
"""
file_name = for_this_file.split('/')[-1]
idx_dir = '/uufs/chpc.utah.edu/common/home/horel-group/archive/' + put_here
if not os.path.exists(idx_dir):
os.makedirs(idx_dir)
idx_name = idx_dir + file_name + '.idx'
os.system('wgrib2 ' + for_this_file + ' -t -var -lev -ftime > ' + idx_name)
print "created idx file:", idx_name
def copy_to_horelS3(from_here, to_there):
"""
Copy the file to the horelS3: archive using rclone
Input:
from_here - string of full path and file name you want to copy
to_there - string of path on the horelS3 archive
"""
# Copy the file from_here to_there (the path will be created if it doesn't exist)
os.system('rclone --config %s copy %s horelS3:%s' \
% (config_file, from_here, to_there))
print "\n================================================"
print "Moving HRRR to S3"
for model_type in [1, 2, 3]:
"""
Attempt to copy all possible hours, forecast hours, and variable types etc.
for HRRR from thehorel-group/archive to the horelS3:HRRR archive.
"""
timer1 = datetime.now()
model = model_HG_names[model_type]
# Build the current day directory and file to move
DIR = '/uufs/chpc.utah.edu/common/home/horel-group/archive/%04d%02d%02d/BB_test/models/%s/' \
% (DATE.year, DATE.month, DATE.day, model)
# HRRR and HRRRx have 18 hour forcasts, Alaska has 36 hour forecasts.
# HRRR and HRRRx run every hour, Alaska runs every three hours.
if model == 'hrrrAK':
forecasts = np.arange(0, 37)
hours = np.arange(0, 24, 3)
else:
forecasts = np.arange(0, 19)
hours = np.arange(0, 24)
# Open file for printing output log. Organize into directories by year and month.
# log_path = '/uufs/chpc.utah.edu/common/home/horel-group/archive_s3/logs/%s_%04d-%02d' \
# % (model, DATE.year, DATE.month)
# if not os.path.exists(log_path):
# os.makedirs(log_path)
# log = open('%s/%s_%04d-%02d-%02d.txt' % (log_path, model, DATE.year, DATE.month, DATE.day), 'w')
# log.write('Moving %s files\nDate: %s\n' % (model, DATE))
# log.write('Origin: ' + DIR)
# Do lots of loops...file types (t), hour of day (h), forecast hour (f).
# loop for each type: sfc, prs, buf
for t in file_types:
# Build the new S3 directory path name (e.g. HRRR/oper/sfc/20171201)
DIR_S3 = 'HRRR/%s/%s/%04d%02d%02d/' \
% (model_S3_names[model_type], t, DATE.year, DATE.month, DATE.day)
# log.write(' \n\nCopy to: horelS3:'+DIR_S3+'\n')
# log.write("========== Checking for "+model + ' ' + t +" files ====================\n")
# loop for each hour (0,24)
for h in hours:
# log.write('Hour %02d:' % (h))
# loop for each forecast hour, depenent on model type.
for f in forecasts:
# Skip known condition that doesn't exist
if t == 'buf' and f > 0:
# bufr files not dependent on the forecast hour becuase
# analysis and forecast are in the same file.
continue
print ""
print "===================================="
print " Where am I?"
print " Date =", DATE
print " model =", model
print " type =", t
print " hour =", h
print " forec =", f
# File format example: hrrr.t00z.wrfsfcf00.grib2
FILE = DIR + '%s.t%02dz.wrf%sf%02d.grib2' % (model, h, t, f)
# Check if the grib2 file exists.
# If it does, then copy the file to S3 and create a .idx file.
if os.path.isfile(FILE):
copy_to_horelS3(FILE, DIR_S3)
create_idx(FILE, DIR_S3)
# log.write('[f%02d]' % (f))
else:
print ""
# log.write('[ ]')
# log.write('\n')
# Change permissions of S3 directory to public
s3cmd = '/uufs/chpc.utah.edu/common/home/horel-group/archive_s3/s3cmd-1.6.1/s3cmd'
os.system(s3cmd + ' setacl s3://%s --acl-public --recursive' % DIR_S3)
# log.close()
print "Timer, copy from horel-gropu/archvie to S3:", datetime.now() - timer1
|
987,005 | d72039d9c6cc7a141798c2d733b9e3e800550f78 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import socket
from gevent import monkey
monkey.patch_all()
import gevent
import gevent.pool
import sys
import getopt
port_service = {'21': 'ftp', '22': 'SSH', '23': 'Telnet', '80': 'web----Http.sys远程代码执行漏洞', '161': 'SNMP', '389': 'LDAP',
'443': 'SSL心脏滴血以及一些web漏洞测试', '445': 'SMB', '512': 'Rexec', '513': 'Rexec', '514': 'Rexec',
'873': 'Rsync未授权', '1025': 'NFS', '111': 'NFS', '1433': 'MSSQL', '1521': 'Oracle(iSqlPlus)',
'2082': 'cpanel主机管理系统登陆', '2083': 'cpanel主机管理系统登陆', '2222': 'DA虚拟主机管理系统登陆(国外用较多)',
'2601': 'zebra路由,默认密码zebra', '2604': 'zebra 路由,默认密码zebra', '3128': 'squid代理默认端口,如果没设置口令很可能就直接漫游内网了',
'3306': 'MySQL', '3312': 'kangle主机管理系统登陆', '3311': 'kangle主机管理系统登陆', '3389': '远程桌面----RDP漏洞',
'4440': 'rundeck', '5432': 'PostgreSQL', '5672': 'rabbitMQ(guest/guest)',
'15672': 'rabbitMQ(guest/guest)', '5900': 'vnc--使用realVNCviewer连接被测ip', '5984': 'CouchDB',
'6082': 'varnish,Varnish,HTTP,accelerator,CLI,未授权访问易导致网站被直接篡改或者作为代理进入内网', '6379': 'redis未授权',
'7001': 'WebLogic默认弱口令,反序列', '7002': 'WebLogic默认弱口令,反序列', '7008': 'SSRF漏洞', '7778': 'Kloxo主机控制面板登录',
'8080': 'JBOSS', '8089': 'JBOSS', '9090': 'JBOSS', '8083': 'Vestacp主机管理系统(国外用较多)', '8649': 'ganglia',
'8808': 'web应用', '8888': 'amh/LuManager主机管理系统默认端口', '9200': 'elasticsearch', '9300': 'elasticsearch',
'10000': 'Virtualmin/Webmin服务器虚拟主机管理系统', '11211': 'memcache未授权访问', '27017': 'Mongodb未授权访问',
'27018': 'Mongodb未授权访问', '28017': 'mongodb统计页面', '50000': 'SAP命令执行', '50070': 'hadoop默认端口 未授权访问',
'50030': 'hadoop默认端口未授权访问'}
def TCP_connect(ip, port):
"""模拟TCP连接"""
TCP_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCP_sock.settimeout(0.5) # 设置连接超时
try:
result = TCP_sock.connect_ex((ip, int(port)))
if result == 0:
if port in port_service.keys():
print("[*]%s 端口 开启" % port, "服务是", port_service[port], "\t")
else:
print("[*]%s 端口 开启" % port, "服务需手动确认", "\t")
else:
# print("[!]%s端口 关闭"%port)
pass
TCP_sock.close()
except socket.error as e:
print("[!]错误:", e)
def scan_web(argv):
# 获取命令行参数
try:
opts, args = getopt.getopt(argv, "hu:t:", ["url=", "types="])
except getopt.GetoptError:
print('端口扫描.py -u <url> -t <types>')
sys.exit(2)
for opt, arg in opts:
if opt in ('-u', '--url'):
web = arg
# print(web)
# 判断要进行何种扫描
elif opt in ('-t', '--types'):
types = arg
# print(arg)
global web_addr
if types == 'f':
"""扫描目标网址"""
web_addr = web
if "http://" in web or "https://" in web:
web = web[web.find('://') + 3:]
# print(web)
print("[*]正在分析网站服务器IP")
try:
server_ip = socket.gethostbyname(str(web))
print("[*]服务器IP为%s" % server_ip)
scan_port_full(server_ip)
except Exception as e:
print("[!]服务器IP获取失败")
pass
elif types == 'p':
"""扫描目标网址"""
web_addr = web
if "http://" in web or "https://" in web:
web = web[web.find('://') + 3:]
print(web)
print("[*]正在分析网站服务器IP")
try:
server_ip = socket.gethostbyname(str(web))
print("[*]服务器IP为%s" % server_ip)
scan_port_part(server_ip)
except Exception as e:
print("[!]服务器IP获取失败")
pass
def scan_port_part(ip):
"""扫描端口"""
print("[*]开始扫描目标端口")
start = time.time()
g = gevent.pool.Pool(20) # 设置线程数
run_list = []
port_list = ['21', '22', '23', '80', '161', '389', '443', '445', '512', '513', '514', '873', '1025', '111', '1433',
'1521', '2082', '2083', '2222', '2601', '2604', '3128', '3306', '3312', '3311', '3389', '4440', '5432',
'5672', '15672', '5900', '5984', '6082', '6379', '7001', '7002', '7008', '7778', '8080', '8080',
'8089', '9090', '8083', '8649', '8808', '8888', '9200', '9300', '10000', '11211', '27017', '27018',
'28017', '50000', '50070', '50030']
for port in port_list:
run_list.append(g.spawn(TCP_connect, ip, port))
gevent.joinall(run_list)
end = time.time()
print("[*]总耗时%s" % time.strftime("%H:%M:%S", time.gmtime(end - start)))
def scan_port_full(ip):
"""扫描端口"""
print("[*]开始扫描目标端口")
start = time.time()
g = gevent.pool.Pool(20) # 设置线程数
run_list = []
for port in range(1, 65535):
run_list.append(g.spawn(TCP_connect, ip, port))
gevent.joinall(run_list)
end = time.time()
print("[*]总耗时%s" % time.strftime("%H:%M:%S", time.gmtime(end - start)))
if __name__ == "__main__":
scan_web(sys.argv[1:])
|
987,006 | b90a6e41bcf54b541f82f4b73b26cec61a44eebe | from SessionTimedOutException import SessionTimedOutException
from datetime import datetime
import time
import httplib
class Shell:
def __init__(self, toolbox, index, prompt):
self._toolbox = toolbox
self._index = index
self._prompt = prompt # old shell
def write(self, command):
if self._toolbox.get_streaming_flag():
self.stop_live()
self._prompt.write(command)
def read(self):
ret = self._prompt.read()
begin = self._oldTime = datetime.now()
while (len(ret) == 0) and not (self.is_timer_expired(begin)):
time.sleep(0.2)
ret = self._prompt.read()
if ("Rex::TimeoutError Operation timed out." in ret) or self.is_timer_expired(begin):
self._toolbox.session_kill(self._index)
raise SessionTimedOutException("SessionTimedOutException: session {} is dead.".format(self._index))
if ("Broken pipe from" in ret):
raise httplib.CannotSendRequest()
return ret
def is_timer_expired(self, begin):
currentTime = datetime.now()
delta = (currentTime - begin).total_seconds()
return delta > 30
def clean(self):
""" Just read in case something was written (if nothing is written, don't block """
self._prompt.read()
def update_streaming_flag(self):
ret = self._prompt.read()
if "[*] Stopped" in ret:
self._toolbox.disable_streaming_flag()
def stop_live(self):
""" Do a CTRL+C on the meterpreter session """
self._toolbox.disable_streaming_flag()
self._toolbox.disable_webcam(self._index)
self._prompt.kill()
|
987,007 | 6db822df9b776fea315838977ff6f27e745d9bdd | def is_unique(strr):
"""Without using any library functions"""
def _contains(string, char):
for c in string:
if c == char: return True
return False
for index in range(len(strr)):
if _contains(strr[:index], strr[index]): return False
return True
num = int(input())
for i in range(num):
string = input()
validationList = []
for j in range(len(string)):
validationList.append(ord(string[j]))
validationList.sort()
if len(string) == 1:
print("Yes")
continue
if validationList[0] + len(string) - 1 != validationList[-1]:
validation = False
else:
validation = True
if ((is_unique(string) and validation)):
print("Yes")
else:
print("No")
|
987,008 | 54fcbb83d3a525212ef79aad5804b5bdfb657d66 | import math
import os
import random
import re
import sys
# local
import trie
def longest_common_substring(s1, s2):
t1 = trie.Trie()
lcstr = ''
lcstrlen = 0
for i in range(0, len(s1)):
for j in range(0, len(s1)):
k = s1[i:j+1]
if len(k) > 0:
t1.put(k, '.')
for i in range(0, len(s2)):
for j in range(0, len(s2)):
k = s2[i:j+1]
if len(k) > lcstrlen and t1.get(k) is not None:
# print('k={} v={}'.format(k, t1.get(k)))
lcstr = k
lcstrlen = len(k)
return (lcstr, lcstrlen)
print(longest_common_substring('abc', 'foobc123'))
print(longest_common_substring('xxxabc', 'fxxxxxxoobc123'))
print(longest_common_substring('zzYabS', 'fxxxxxxooXc123'))
|
987,009 | a6324518728642463ff22cef658806bbe116a5dc | """
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
Copyright 2015 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class Logging(google.protobuf.message.Message):
"""Logging configuration of the service.
The following example shows how to configure logs to be sent to the
producer and consumer projects. In the example, the `activity_history`
log is sent to both the producer and consumer projects, whereas the
`purchase_history` log is only sent to the producer project.
monitored_resources:
- type: library.googleapis.com/branch
labels:
- key: /city
description: The city where the library branch is located in.
- key: /name
description: The name of the branch.
logs:
- name: activity_history
labels:
- key: /customer_id
- name: purchase_history
logging:
producer_destinations:
- monitored_resource: library.googleapis.com/branch
logs:
- activity_history
- purchase_history
consumer_destinations:
- monitored_resource: library.googleapis.com/branch
logs:
- activity_history
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class LoggingDestination(google.protobuf.message.Message):
"""Configuration of a specific logging destination (the producer project
or the consumer project).
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
MONITORED_RESOURCE_FIELD_NUMBER: builtins.int
LOGS_FIELD_NUMBER: builtins.int
monitored_resource: builtins.str
"""The monitored resource type. The type must be defined in the
[Service.monitored_resources][google.api.Service.monitored_resources] section.
"""
@property
def logs(
self,
) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[
builtins.str
]:
"""Names of the logs to be sent to this destination. Each name must
be defined in the [Service.logs][google.api.Service.logs] section. If the log name is
not a domain scoped name, it will be automatically prefixed with
the service name followed by "/".
"""
def __init__(
self,
*,
monitored_resource: builtins.str = ...,
logs: collections.abc.Iterable[builtins.str] | None = ...,
) -> None: ...
def ClearField(
self,
field_name: typing_extensions.Literal[
"logs", b"logs", "monitored_resource", b"monitored_resource"
],
) -> None: ...
PRODUCER_DESTINATIONS_FIELD_NUMBER: builtins.int
CONSUMER_DESTINATIONS_FIELD_NUMBER: builtins.int
@property
def producer_destinations(
self,
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[
global___Logging.LoggingDestination
]:
"""Logging configurations for sending logs to the producer project.
There can be multiple producer destinations, each one must have a
different monitored resource type. A log can be used in at most
one producer destination.
"""
@property
def consumer_destinations(
self,
) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[
global___Logging.LoggingDestination
]:
"""Logging configurations for sending logs to the consumer project.
There can be multiple consumer destinations, each one must have a
different monitored resource type. A log can be used in at most
one consumer destination.
"""
def __init__(
self,
*,
producer_destinations: collections.abc.Iterable[
global___Logging.LoggingDestination
]
| None = ...,
consumer_destinations: collections.abc.Iterable[
global___Logging.LoggingDestination
]
| None = ...,
) -> None: ...
def ClearField(
self,
field_name: typing_extensions.Literal[
"consumer_destinations",
b"consumer_destinations",
"producer_destinations",
b"producer_destinations",
],
) -> None: ...
global___Logging = Logging
|
987,010 | 243dfe6adf8c8c2350156fb94772b94be5efdb59 | from django.contrib import admin
from .models import Vision, Research
admin.site.register(Vision)
admin.site.register(Research) |
987,011 | d76bf2a679b9464bd4006997fb666f97c9a71c0f | from main import *
from flask import Blueprint
blueprint = Blueprint("board", __name__, url_prefix="/board")
category = [
{"박물관소개": {
"관장 인사글": "about",
"관람 안내 및 오시는 길": "location",
"관련 기사": "news",
"로고 소개": "logo",
}},
{"풀짚공예 전시실": {
"소장유물 소개": "relic",
"상설 전시": "expatiation_exhibition",
"특별 전시": "special_exhibition",
"체험교육 전시": "experience_exhibition",
}},
{"풀짚공예 교육": {
"풀짚공예란?": "info",
"만들기 동영상": "video",
"체험학습": "field_study",
"일반&전문가 심화과정": "normal_study",
}},
{"풀짚 문화": {
"책 소개": "culture_book",
"바구니여행": "culture_basket",
"풀짚갤러리": "pulzip_gallery",
}},
{"커뮤니티": {
"공지사항": "notice",
"자유게시판": "free",
"포토갤러리": "gallery",
"체험예약": "reservation",
}},
]
@blueprint.route("/list")
def board_list():
# 페이지 값 (값이 없는 경우 기본값은 1), 리미트 값 (몇 개의 게시물을 나오게 할 것인지)
page = request.args.get("page", 1, type=int)
limit = request.args.get("limit", 10, type=int)
board_sort = request.args.get("board_sort", -1, type=int)
board = mongo.db.board
tot_count = board.find({}).count() # 게시물의 총 개수
last_page_num = math.ceil(tot_count / limit) # 마지막 페이지 수 = 전체 게시물 수 / 페이지당 게시물 수
block_size = 5
block_num = int((page - 1) / block_size) # block 현재 위치
block_start = int((block_size * block_num) + 1) # block 시작 위치
block_last = math.ceil(block_start + (block_size - 1)) # block 마지막 위치
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", -1)
if board_sort == 0:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("title", 1)
elif board_sort == 1:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("title", -1)
elif board_sort == 2:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("name", 1)
elif board_sort == 3:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("name", -1)
elif board_sort == 4:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", -1)
elif board_sort == 5:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("pubdate", 1)
elif board_sort == 6:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("view", -1)
elif board_sort == 7:
datas = board.find({}).skip((page - 1) * limit).limit(limit).sort("view", 1)
return render_template("/board/list.html", page=page, limit=limit, board_sort=board_sort, datas=datas, tot_count=tot_count, block_start=block_start, block_last=block_last, last_page_num=last_page_num, category=category)
@blueprint.route("/view/<idx>")
@login_required
def board_view(idx):
# idx = request.args.get("idx")
if idx is not None:
page = request.args.get("page")
board_sort = request.args.get("board_sort")
board = mongo.db.board
# data = board.find_one({"_id": ObjectId(idx)})
data = board.find_one_and_update({"_id": ObjectId(idx)}, {"$inc": {"view": 1}}, return_document=True)
if data is not None:
result = {
"id": data.get("_id"),
"name": data.get("name"),
"title": data.get("title"),
"contents": data.get("contents"),
"pubdate": data.get("pubdate"),
"view": data.get("view"),
"writer_id": data.get("writer_id", "")
}
return render_template("/board/view.html", result=result, page=page, board_sort=board_sort, category=category)
return abort(404)
@blueprint.route('/write', methods=["GET", "POST"])
@login_required
def board_write():
if request.method == "POST":
name = request.form.get("name")
title = request.form.get("title")
contents = request.form.get("contents")
current_utc_time = round(datetime.utcnow().timestamp() * 1000)
board = mongo.db.board
post = {
"name": name,
"title": title,
"contents": contents,
"pubdate": current_utc_time,
"writer_id": session.get("id"),
"view": 0,
}
x = board.insert_one(post)
return redirect(url_for("board.board_view", idx=x.inserted_id))
else:
return render_template("/board/write.html", category=category)
@blueprint.route("/edit/<idx>", methods=["GET", "POST"])
def board_edit(idx):
if request.method == "GET":
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data is None:
flash("해당 게시물이 존재하지 않습니다.")
return redirect(url_for("board.board_list"))
else:
if session.get("id") == data.get("writer_id"):
return render_template("/board/edit.html", data=data, category=category)
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.board_list"))
else:
title = request.form.get("title")
contents = request.form.get("contents")
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if session.get("id") == data.get("writer_id"):
board.update_one({"_id": ObjectId(idx)}, {
"$set": {
"title": title,
"contents": contents,
}
})
flash("수정되었습니다.")
return redirect(url_for("board.board_view", idx=idx))
else:
flash("글 수정 권한이 없습니다.")
return redirect(url_for("board.board_list"))
@blueprint.route("/delete/<idx>")
def board_delete(idx):
board = mongo.db.board
data = board.find_one({"_id": ObjectId(idx)})
if data.get("writer_id") == session.get("id"):
board.delete_one({"_id": ObjectId(idx)})
flash("삭제되었습니다.")
else:
flash("삭제 권한이 없습니다.")
return redirect(url_for("board.board_list"))
|
987,012 | 17b62650f2617ea2f196c3c2e8464d36c4c2a678 | from functools import wraps
from .error import SCBPaymentError
def check_in_kwargs(kwarg_names):
"""
check if the wrapped function's class have the specified kwargs
:param kwarg_names: array of kwargs names to check
:return:
"""
def layer(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
for kwarg in kwarg_names:
if kwarg not in kwargs:
raise SCBPaymentError('"{0}" attrs is required'.format(kwarg))
return func(self, *args, **kwargs)
return wrapper
return layer
|
987,013 | 1b0d485c0c130cabf5f1146f54da6ce166aa3f81 | import numpy as np
import matplotlib.pyplot as plt
# それっぽいデータを作る処理
from datetime import datetime, timedelta
import scipy
import scipy.stats
def create_dummy_data(days):
x = np.linspace(2, 2 + days, days)
y = scipy.stats.norm.pdf(x, loc=0, scale=4) * 4 + 0.15
y = y + np.abs(np.random.randn(len(y)))/25
date = datetime.now().date() - timedelta(days=days)
return {
"date": date,
"values": list(y)
}
vals = map(create_dummy_data, np.arange(1, 30))
vals.reverse()
from matplotlib import dates
# 表示する経過日数
max_y = 35
# xはdatetimeからnumberに変換しておく
x = map(lambda v: dates.date2num(v['date']), vals)
# yは1(翌日)からスタート
y = np.arange(1, max_y + 1)
# xとyのメッシュを作成
Y, X = np.meshgrid(y, x)
def expand_z(v):
v = v['values']
v += list(np.zeros(max_y - len(v)))
return v
# 縦横を揃えるためにゼロ埋め配列を追加する
z = map(expand_z, vals)
# numpyの行列に変換する
Z = np.array(z).reshape(len(z), len(z[0]))
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 4))
# プロットの作成
# データによって色が変わってしまうのを回避するため、最大値を指定
im = ax.pcolor(X, Y, Z, vmax=0.6)
# タイトル
ax.set_title(u'Launch Retention')
# y軸
ax.set_ylabel(u'Past Days')
ax.set_ylim(bottom=1)
# x軸
ax.set_xlim(x[0], x[-1])
# カラーバー
plt.colorbar(im)
# Ticks
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
minorLocator = MultipleLocator(5)
ax.xaxis.set_minor_locator(dates.DayLocator())
ax.xaxis.set_minor_formatter(dates.DateFormatter('%d'))
ax.xaxis.set_major_locator(dates.MonthLocator())
ax.xaxis.set_major_formatter(dates.DateFormatter('%Y %b'))
ax.xaxis.set_tick_params(which='major', pad=17)
plt.xticks(rotation=0) |
987,014 | e878a38f872bf4ed95db52a69be9b1098a4a38b9 | x,y = map(int,input().split())
A = [1,3,5,7,8,10,12]
B = [4,6,9,11]
C = [2]
ans = 'No'
if (x in A) and (y in A):
ans ='Yes'
elif (x in B) and (y in B):
ans ='Yes'
elif (x in C) and (y in C):
ans ='Yes'
print(ans) |
987,015 | 2274d7ee86ee46e66f8f171f6b458297178c41ce | # https://cryptopals.com/sets/1/challenges/1
hex_string = '49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d'
decoded = hex_string.decode('hex')
base_64_string = decoded.encode('base64')
print(base_64_string)
|
987,016 | a087f803da75bb57d41d401523ac91021deaba21 | # Generated by Django 2.1.2 on 2018-10-26 11:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('watcher', '0010_auto_20181026_1107'),
]
operations = [
migrations.AlterField(
model_name='websitechecksettings',
name='website_hash',
field=models.CharField(max_length=128),
),
]
|
987,017 | 2101a1c1dd558c8bcf74d378912e6ecbd09fdede | number = 7
while True:
user_input = input("Would you like to play? (Y/n)")
if user_input == "n":
break
user_number = int(input("Guess our number: "))
if user_number == number:
print("You guessed correctly!")
elif (number-user_number) in (1, -1):
print("You were off by one.")
else:
print("Sorry, it's wrong.")
|
987,018 | c727440d1bf1185091ff8e2aaaab5bc7d0b96bdf | # Generated by Django 2.2.8 on 2019-12-05 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("api", "0040_project_currently_creating_pr"),
]
operations = [
migrations.AddField(
model_name="project",
name="pr_number",
field=models.IntegerField(blank=True, null=True),
),
]
|
987,019 | 86c128372f10522c6f30888f0cc1036ab2afaf14 | import pygame
import time
import random
from classes.Image import Image
import classes.Asset as Asset
from games.diskShooting.classes.Button import Button
from classes.Color import Color
class Start():
def __init__(self):
self.clock = pygame.time.Clock()
self.state = 'intro'
# decorate game window
pygame.display.set_caption('Olympische Spelen - Skiing')
def Start(self, screen):
display_width = 800
display_height = 600
black = (0, 0, 0)
white = (255, 255, 255)
red = (200, 0, 0)
green = (0, 200, 0)
bright_red = (255, 0, 0)
bright_green = (0, 255, 0)
skier_width = 73
skierPng = pygame.image.load(Asset.loadpath('skiing', 'img', 'skier3.png'))
BackgroundIntro = pygame.image.load(Asset.loadpath('skiing', 'img', 'background_intro.png'))
gamebackground = pygame.image.load(Asset.loadpath('skiing', 'img', 'gamebackground.png'))
BoomPng = pygame.image.load(Asset.loadpath('skiing', 'img', 'boom.png'))
# crash = True
# Bomen ontwijken en score optellen
def poortje_dodged(screen, count):
font = pygame.font.SysFont(None, 25)
text = font.render("Dodged: " + str(count), True, black)
screen, screen.gameDisplay.blit(text, (100, 0))
# Boom image
def poortje(screen, poortjex, poortjey, poortjew, poortjeh, color):
screen.gameDisplay.blit(BoomPng, (poortjex, poortjey, poortjew, poortjeh))
# skier image
def skier(screen, x, y):
screen.gameDisplay.blit(skierPng, (x, y))
# background image
def background(screen, x, y):
screen.gameDisplay.blit(gamebackground, (0, 0))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def game_loop(screen):
x = (display_width * 0.45)
y = (display_height * 0)
x_change = 0
poortje_startx = random.randrange(100, display_width - 100)
poortje_starty = 600
poortje_speed = 6
poortje_width = 110
poortje_height = 98
dodged = 0
gameExit = False
clock = pygame.time.Clock()
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
elif event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
screen.gameDisplay.fill(white)
# poortje(poortjex, poortjey, poortjew, poortjeh, color)
background(screen, x, y)
poortje(screen, poortje_startx, poortje_starty, poortje_width, poortje_height, black)
poortje_starty -= poortje_speed
skier(screen, x, y)
poortje_dodged(screen, dodged)
if x > display_width - skier_width or x < 0:
return ['end', dodged]
if poortje_starty < -300:
poortje_starty = 600 + poortje_height
poortje_startx = random.randrange(100, display_width - 200)
dodged += 1
poortje_speed += 0.75
# wanneer je tegen poortje aan crasht
if y < poortje_starty + poortje_height and y > poortje_starty:
if x > poortje_startx and x < poortje_startx + poortje_width or x + skier_width > poortje_startx and x + skier_width < poortje_startx + poortje_width:
return ['end', dodged]
pygame.display.update()
clock.tick(50)
def game_intro(screen):
# Main screen text
logo = Image(Asset.loadpath('skiing', 'img', 'logo.jpg'), [0, 0])
background = Image(Asset.loadpath('skiing', 'img', 'background.jpg'), [0, 0])
intructions = Image(Asset.loadpath('skiing', 'img', 'background_intro.png'), [150, 50])
# Buttons
start_btn = Button()
quit_btn = Button()
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 'quit'
screen.gameDisplay.blit(background.image, background.rect)
screen.gameDisplay.blit(intructions.image, intructions.rect)
screen.gameDisplay.blit(logo.image, logo.rect)
check_start = start_btn.setButton("Start", 150, 450, 100, 50, Color.GREEN.getRGB(),
Color.DARK_GREEN.getRGB(),
screen, 'game')
check_end = quit_btn.setButton("Return", 550, 450, 100, 50, Color.RED.getRGB(), Color.DARK_RED.getRGB(),
screen,
'return')
# Return results of button clicked
if check_start != None:
return 'game'
elif check_end != None:
return 'return'
else:
pygame.display.update()
clock.tick(15)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
quit()
break
if self.state == 'intro':
result = game_intro(screen)
if result == None:
break
else:
self.state = result
elif self.state == 'game':
result = game_loop(screen)
print(result)
if result == None:
break
else:
self.state = result[0]
elif self.state == 'end':
self.state = 'intro'
return ['score', 'skiing', result[1]]
elif self.state == 'quit':
pygame.display.quit()
pygame.quit()
elif self.state == 'return':
return 'start'
pygame.display.update()
self.clock.tick(60)
pygame.quit()
quit()
|
987,020 | 5b018926e31282cf487be4f13b505f9774862144 | """
Code by Ricardo Musch - February 2020
Get the latest release at:
https://github.com/RicardoMusch/rtm-tk-nuke-lut-app
"""
import nuke
import os
import sgtk
# List of Luts to Update. Last one will be the default.
luts = ["SHOW LUT", "SHOT LUT"]
def update():
for lut in luts:
# Get the Just Created Viewer Process
lut_name = lut
lut = nuke.ViewerProcess.node(lut_name)
for viewer in nuke.allNodes("Viewer"):
viewer["viewerProcess"].setValue(lut["name"].getValue())
# Run LUT Button
lut["update"].execute()
print "Running 'update' knob on the '"+lut_name+"' node"
def loadLut():
logBig("Loading: "+os.path.basename(__file__))
try:
# Get a Shotgun Connection
sg = _getShotgunConnection()
# Get a Shotgun Context
context = _getShotgunEntityContext(sg)
# Get the Just Created Viewer Process
lut = nuke.thisNode()
##################################################################
############# ADD SHOW FUNCTION CODE HERE ########################
##################################################################
"""
Use the 'lut' var to acces the created gizmo and its knobs.
"""
# Get Shot Data
filters = [ ["id", "is", context["id"]] ]
fields = ["sg_cdl_asc_sat", "sg_cdl_asc_sop", "sg_lut"]
sg_shot = sg.find_one("Shot", filters, fields)
log(sg_shot)
dataField = "sg_cdl_asc_sop"
if sg_shot[dataField] != None:
lut["slope"].setValue(_filterSatSop("slope", sg_shot[dataField]))
lut["offset"].setValue(_filterSatSop("offset", sg_shot[dataField]))
lut["power"].setValue(_filterSatSop("power", sg_shot[dataField]))
lut["working_space"].setValue("AlexaV3LogC")
dataField = "sg_cdl_asc_sat"
if sg_shot[dataField] != None:
lut["saturation"].setValue(_filterSatSop("saturation", sg_shot[dataField]))
# Disable CDl if Show LUT
if lut.Class() == "SHOW_LUT":
lut["disable_OCIOCDLTransform"].setValue(True)
else:
lut["disable_OCIOCDLTransform"].setValue(False)
##################################################################
############# END SHOW FUNCTION CODE #############################
##################################################################
# Set Current and more Viewers to LUT we Created
log("Setting all Viewers to use the Lut")
for node in nuke.allNodes("Viewer"):
node["viewerProcess"].setValue(lut_name)
except Exception as e:
print "\n\n"
print "ERROR loading Shot Lut!"
print e
print "\n\n"
def _filterSatSop(datatype, data):
data = data.replace("(", "")
if datatype == "slope":
data = data.split(")")[0]
data = data.split(" ")
vals = [float(data[0]), float(data[1]), float(data[2])]
msg = datatype, vals
log(msg)
return data
if datatype == "offset":
data = data.split(")")[1]
data = data.split(" ")
vals = [float(data[0]), float(data[1]), float(data[2])]
msg = datatype, vals
log(msg)
return data
if datatype == "power":
data = data.split(")")[2]
data = data.split(" ")
vals = [float(data[0]), float(data[1]), float(data[2])]
msg = datatype, vals
log(msg)
return data
if datatype == "saturation":
data = data.replace("(", "")
data = data.replace(")", "")
msg = datatype, data
log(msg)
return float(data)
def _findTopNode():
"""
Returns the Top Node connection, usually a Read node.
"""
node = nuke.thisNode()
parent_node = node
while parent_node != None:
last_node = parent_node
parent_node = parent_node.input(0)
return last_node
def _extractVersionName(path):
path = os.path.basename(path)
path = path.lower()
parts = path.split(".")
version_name = None
for part in parts:
if not "#" in part:
if not "%" in part:
if version_name == None:
version_name = part
else:
version_name += "."+part
return version_name
def _getShotgunConnection():
try:
#### Get Shotgun Connection and Context when in SGTK session
# get the engine we are currently running in
current_engine = sgtk.platform.current_engine()
# get hold of the shotgun api instance used by the engine, (or we could have created a new one)
sg = current_engine.shotgun
return sg
except:
#### Get Shotgun Connection and Context when not in SGTK session (example: farm)
#####################################################
logBig("Importing Shotgun API3")
#####################################################
shotgun_api3_location = os.environ["SHOTGUN_API3"]
sys.path.append(shotgun_api3_location)
import shotgun_api3
log("Imported the Shotgun Standalone API3")
#####################################################
logBig("Connecting to Shotgun API")
#####################################################
sg = shotgun_api3.Shotgun(os.environ["SHOTGUN_API_SERVER_PATH"], os.environ["SHOTGUN_API_SCRIPT_NAME"], os.environ["SHOTGUN_API_SCRIPT_KEY"])
log("Connected to Shotgun!")
return sg
def _getShotgunEntityContext(sg):
try:
#### SGTK SESSION
# get the engine we are currently running in
current_engine = sgtk.platform.current_engine()
# Get current Context
context = current_engine.context
return context.entity
except:
#### STANDALONE SESSION (example: farm)
# Find Top node to get a path from
topNode = _findTopNode()
try:
filepath = topNode["file"].getValue().replace("\\", "/")
# Find Version that links to Entity
filters = [ ["sg_path_to_frames", "is", filepath] ]
version_fields = ["entity"]
sg_version = sg.find_one("Version", filters, version_fields)
if sg_version == None:
# Find Version that links to Entity - Reverse path separators
filters = [ ["sg_path_to_frames", "is", filepath.replace("/", "\\")] ]
sg_version = sg.find_one("Version", filters, version_fields)
if sg_version == None:
# Find Version that links to Entity - Version code Match (least precise)
filters = [ ["sg_path_to_frames", "is", _extractVersionName(filepath) ] ]
sg_version = sg.find_one("Version", filters, version_fields)
return sg_version["entity"]
except Exception as e:
log("Error Finding Entity Context")
log(e)
def log(msg):
print " "+str(msg)
def logBig(msg):
print "\n\n"
print "############################################################"
print msg
print "############################################################"
# Run Main Function
loadLut()
log(" ") |
987,021 | 246ebc9dab45595ad60794b014a9a1b1b051d771 | """Test that the vacuum analyzer system"""
from math import isclose,pi
import numpy as np
from vacuum_modeling.vacuum_analyzer import VacuumSystem, solve_vac_system,tube_conductance
def test_vacuum_results_with_hand_values():
# test that differential pumping works as expected
S1, S2, Q, L, D = 100.0, 1.0, .1, 1.25, .05
gas_mass=28
vac_sys = VacuumSystem(gas_mass_Daltons=gas_mass)
vac_sys.add_chamber(S=0.0, Q=Q)
vac_sys.add_tube(L, D)
vac_sys.add_chamber(S=S1)
vac_sys.add_tube(L, D)
vac_sys.add_chamber(S=S2)
solve_vac_system(vac_sys)
# approximate values from vacuum calculations. Should be within 1%
C=tube_conductance(gas_mass,D,L)
P1 = Q / C
P2 = P1 * C / S1
P3 = P2 * C / S2
assert isclose(P1, vac_sys.chambers()[0].P, rel_tol=.01)
assert isclose(P2, vac_sys.chambers()[1].P, rel_tol=.01)
assert isclose(P3, vac_sys.chambers()[2].P, rel_tol=.01)
def test_same_vals():
vac_sys = VacuumSystem()
vac_sys.add_chamber(S=10.0, Q=1)
vac_sys.add_tube(.35, .1)
vac_sys.add_tube(.35, .1)
vac_sys.add_tube(.35, .1)
vac_sys.add_chamber(S=.01)
vac_sys.add_tube(3.0, .25)
vac_sys.add_chamber(S=100.0)
solve_vac_system(vac_sys)
P_vals = [0.09989873972504781, 0.013653031647444466, 8.760724330472494e-06]
assert isclose(P_vals[0], vac_sys.chambers()[0].P)
assert isclose(P_vals[1], vac_sys.chambers()[1].P)
assert isclose(P_vals[2], vac_sys.chambers()[2].P)
def circular_or_linear_system(is_circular):
vac_sys = VacuumSystem(is_circular=is_circular)
vac_sys.add_chamber(S=1.0, Q=1)
vac_sys.add_tube(.5, .01)
vac_sys.add_tube(.5, .01)
vac_sys.add_chamber(S=.01)
if is_circular:
vac_sys.add_tube(.5, .01)
vac_sys.add_tube(.5, .01)
solve_vac_system(vac_sys)
return vac_sys
def test_circular_vs_linear():
"""Test that two chambers connected by a single pipe and then by two pipes behave as expected. IE for low
conductance of the pipes, and high gass low and pumping in chamber 1 but low pumping and no external gas load in
chamber 2, that pressure in chamber 2 should approximately double when two pipes are used. Also compare to saved
values"""
vac_sys = circular_or_linear_system(False)
P_circ = [chamber.P for chamber in vac_sys.chambers()]
P0_circ = [0.9999876873794514, 0.0012312620548606836]
vac_sys = circular_or_linear_system(True)
P_straight = [chamber.P for chamber in vac_sys.chambers()]
P0_straight = [0.999975405344194, 0.0024594655806102796]
assert all(isclose(P, P0) for P, P0 in zip(P_circ, P0_circ))
assert all(isclose(P, P0) for P, P0 in zip(P_straight, P0_straight))
assert isclose(P_circ[0], P_straight[0], abs_tol=.01)
assert isclose(P_circ[1], 2 * P_straight[1], abs_tol=.01)
def test_pressure_profile():
"""Test that the pressure profile along a periodic tube is close the value predicted by theory"""
q = 1e-3
D = .1
L = 10.0
S = 1
c = 12.4 * D ** 3
C_eff = 12 * c / L
C = c / L
S_eff = 1 / (1 / C_eff + 1 / S)
Q = q * D * pi * L
P_max = Q * (1 / (8 * C) + 1 / S)
P_av = Q / S_eff
vac_sys = VacuumSystem()
vac_sys.add_chamber(S=S, Q=0.)
for _ in range(10):
vac_sys.add_tube(L, D, q=q)
vac_sys.add_chamber(S=S, Q=.0)
solve_vac_system(vac_sys)
tube = vac_sys.components[11]
assert isclose(np.mean(tube.P),P_av,rel_tol=.1) and isclose(np.max(tube.P),P_max,rel_tol=.1)
|
987,022 | 387a5ba3c768b34fe2bdb256e09351c9e1a35d1f | __author__ = 'cmotevasselani'
class EquipmentConstants:
# Slots
RIGHT_HAND = 'right-hand'
LEFT_HAND = 'left-hand'
# Equipment
SWORD = 'sword'
SHIELD = 'shield'
DAGGER = 'dagger'
|
987,023 | 42a0010998b81cc8958ee49538b1d71cf9982fa6 | import Const
from Exchange import Exchange
import requests
class Bitstamp(Exchange):
def __init__(self):
super().__init__('Bitstamp', 'https://www.bitstamp.net')
self.prices = {}
def update_coins(self):
self.prices.clear()
coins = requests.get(self.api_base+'/api/v2/trading-pairs-info/')
if coins.status_code is Const.SC_OK :
coins = coins.json()
else :
print(Const.BOLD+Const.FAIL+'Unable to reach '+self.name+' API'+Const.ENDC)
return
for supported in Const.COINS :
for c in coins :
if c['name'] == supported+'/'+Const.BTC and c['trading'] == 'Enabled':
self.prices[supported] = {}
break
def update_prices(self):
for c in self.get_coins() :
ticker = requests.get(self.api_base+'/api/v2/ticker/'+c.lower()+Const.BTC.lower())
if ticker.status_code is Const.SC_OK :
ticker = ticker.json()
else :
print(Const.BOLD+Const.FAIL+'Unable to reach '+self.name+' API'+Const.ENDC)
return
self.prices[c]['bid'] = float(ticker['bid'])
self.prices[c]['ask'] = float(ticker['ask'])
self.prices[c]['last'] = float(ticker['last'])
|
987,024 | 45ab991e1c0350af806fe5a39feeb00e1420f77c | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0023_auto_20150428_1348'),
]
operations = [
migrations.AlterField(
model_name='invoiceitem',
name='instance_name',
field=models.CharField(max_length=64),
preserve_default=True,
),
migrations.AlterField(
model_name='transaction',
name='instance_name',
field=models.CharField(max_length=64),
preserve_default=True,
),
]
|
987,025 | 401de1bd6d6c35ab8f8b625086ec8dbd4a9865e2 | from ...base.base_metric import BaseMetric
class Accuracy(BaseMetric):
def __init__(self):
pass
def compute_value_for_one_batch(self, teacher, pred):
pass
def get_mean(self) -> any:
pass
def get_name(self) -> str:
pass
def clear(self):
pass
|
987,026 | 35db26cb54e852b701025e7439e0fc3aed231e60 | #faça uma classe soma. Está classe terá 2 atributos v1 e v2
#o programa deverá declarar 2 objetos da classe some.
#O programa devrá mostrar na tela a soma dos 2 valores
#a classe deverá ter um método chamado "somar"
class Somar:
def __init__(self,v1,v2):
self.vlr1 = v1
self.vlr2 = v2
def soma(self):
print(self.vlr1 + self.vlr2)
valor1 = int(input('Digite um valor: '))
valor2 = int(input('Digite outro valor: '))
s1 = Somar(valor1,valor2)
s1.soma()
|
987,027 | fa1455c64a1fd7dda894c04c510a19d454093678 | # -*- coding: utf-8 -*-
import base64
import hashlib
import hmac
def is_valid_webhook_event_signature(request_body: str, signature_header: str, signature_key: str,
notification_url: str) -> bool:
"""
Verifies and validates an event notification. See the `documentation`_ for more details.
Args:
request_body: The JSON body of the request.
signature_header: The value for the `x-square-hmacsha256-signature` header.
signature_key: The signature key from the Square Developer portal for the webhook subscription.
notification_url: The notification endpoint URL as defined in the Square Developer portal for the webhook
subscription.
Returns:
bool: True if the signature is valid, indicating that the event can be trusted as it came from Square.
False if the signature validation fails, indicating that the event did not come from Square, so it may
be malicious and should be discarded.
Raises:
ValueError: if `signature_key` or `notification_url` are empty.
.. _documentation:
https://developer.squareup.com/docs/webhooks/step3validate
"""
if not request_body:
return False
if not signature_key:
raise ValueError('signature_key is empty')
if not notification_url:
raise ValueError('notification_url is empty')
# Perform UTF-8 encoding to bytes
payload = notification_url + request_body
payload_bytes = payload.encode('utf-8')
signature_header_bytes = signature_header.encode('utf-8')
signature_key_bytes = signature_key.encode('utf-8')
# Compute the hash value
hashing_obj = hmac.new(key=signature_key_bytes, msg=payload_bytes, digestmod=hashlib.sha256)
hash_bytes = hashing_obj.digest()
# Compare the computed hash vs the value in the signature header
hash_base64 = base64.b64encode(hash_bytes)
return hmac.compare_digest(hash_base64, signature_header_bytes)
|
987,028 | ef4061e5b5c970e5807133002b50190189fefe63 | import streamlit as st
import pandas as pd
import requests
import json
import matplotlib.pyplot as plt
import plotly.express as px
st.title('Coronavírus no Brasil')
DATA_URL = 'https://brasil.io/dataset/covid19/caso?format=csv'
NULL = '---'
data = pd.read_csv(DATA_URL)
data = data.loc[data.place_type == 'city']
st.subheader('Dados Utilizados na análise')
st.write(data)
def mostra_grafico(df, x_label, y_label, titulo):
fig = px.line(df, x=x_label, y=y_label, title=titulo)
fig
# de para sigla -> estado
de_para_sigla_estado = {
'AC': 'Acre',
'AL': 'Alagoas',
'AP': 'Amapá',
'AM': 'Amazonas',
'BA': 'Bahia',
'CE': 'Ceará',
'DF': 'Distrito Federal',
'ES': 'Espírito Santo ',
'GO': 'Goiás',
'MA': 'Maranhão',
'MT': 'Mato Grosso',
'MS': 'Mato Grosso do Sul',
'MG': 'Minas Gerais',
'PA': 'Pará',
'PB': 'Paraíba',
'PR': 'Paraná',
'PE': 'Pernambuco',
'PI': 'Piauí',
'RJ': 'Rio de Janeiro',
'RN': 'Rio Grande do Norte',
'RS': 'Rio Grande do Sul',
'RO': 'Rondônia',
'RR': 'Roraima',
'SC': 'Santa Catarina',
'SP': 'São Paulo',
'SE': 'Sergipe',
'TO': 'Tocantins'
}
# de para estado sigla
de_para_estado_sigla = {
'Acre': 'AC',
'Alagoas': 'AL',
'Amapá': 'AP',
'Amazonas': 'AM',
'Bahia': 'BA',
'Ceará': 'CE',
'Distrito Federal': 'DF',
'Espírito Santo ': 'ES',
'Goiás': 'GO',
'Maranhão': 'MA',
'Mato Grosso': 'MT',
'Mato Grosso do Sul': 'MS',
'Minas Gerais': 'MG',
'Pará': 'PA',
'Paraíba': 'PB',
'Paraná': 'PR',
'Pernambuco': 'PE',
'Piauí': 'PI',
'Rio de Janeiro': 'RJ',
'Rio Grande do Norte': 'RN',
'Rio Grande do Sul': 'RS',
'Rondônia': 'RO',
'Roraima': 'RR',
'Santa Catarina': 'SC',
'São Paulo': 'SP',
'Sergipe': 'SE',
'Tocantins': 'TO'
}
# buscando e convertendo sigla para estado
estados = []
estados.append(NULL)
for estado in data['state'].sort_values().unique():
estados.append(de_para_sigla_estado[estado])
# select de estado
estado_selecionado = st.selectbox(
'Escolha o estado',
estados
)
# mostra o estado selecionado
if estado_selecionado is not NULL:
'Estado selecionado: ', estado_selecionado
# utilizando somente dados do estado selecionado
data = data.loc[data.state == de_para_estado_sigla[estado_selecionado]]
st.subheader('Análise do estado selecionado')
# dataframe de estado agregados pela data
df_estado = data.sort_values(by='date', ascending=True)
# dataframe de casos confirmados no estado selecionado
df_estado_confirmados = df_estado.groupby('date')[['confirmed']].sum().reset_index()
df_estado_confirmados.columns = ['data', f'qtd casos confirmados {estado_selecionado}']
# dataframe de casos de óbitos na cidade filtrada
df_estado_obitos = df_estado.groupby('date')['deaths'].sum().reset_index()
df_estado_obitos.columns = ['data', f'qtd de óbitos {estado_selecionado}']
mostra_grafico(df_estado_confirmados, 'data', f'qtd casos confirmados {estado_selecionado}', f'Casos confirmados em {estado_selecionado}')
mostra_grafico(df_estado_obitos, 'data', f'qtd de óbitos {estado_selecionado}', f'Casos de óbitos em {estado_selecionado}')
# Select de CIDADES do ESTADO selecionado
cidades_estado_selecionado = data['city'].sort_values().unique()
cidades = []
cidades.append(NULL)
for cidade in cidades_estado_selecionado:
cidades.append(cidade)
cidade_selecionada = st.selectbox(
'Escolha a cidade',
cidades
)
if cidade_selecionada is not NULL:
'Cidade selecionada: ', cidade_selecionada
st.subheader('Análise da cidade selecionada')
# Filtrando a cidade selecionada no dataframe do estado
df_cidade = data.loc[data['city'] == cidade_selecionada]
df_cidade.sort_values(by='date', ascending=True)
# dataframe de casos confirmados na cidade filtrada
df_cidade_confirmados = df_cidade.groupby('date')['confirmed'].sum().reset_index()
df_cidade_confirmados.columns = ['data', f'qtd casos confirmados {cidade_selecionada}']
# dataframe de casos de óbitos na cidade filtrada
df_cidade_obitos = df_cidade.groupby('date')['deaths'].sum().reset_index()
df_cidade_obitos.columns = ['data', f'qtd de óbitos {cidade_selecionada}']
# plot dos gráficos de quantidade de confirmados e óbitos por dia
# na cidade filtrada
if cidade_selecionada is not NULL:
mostra_grafico(df_cidade_confirmados, 'data', f'qtd casos confirmados {cidade_selecionada}', f'Casos confirmados em {cidade_selecionada}')
mostra_grafico(df_cidade_obitos, 'data', f'qtd de óbitos {cidade_selecionada}', f'Casos de óbitos em {cidade_selecionada}')
|
987,029 | 8a2fd8586eb73dbf11b1c6b6eb1986d914a9910d | import json
import copy
from selenium import webdriver
def get_growth(channel_id, yno):
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--headless')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko")
driver = webdriver.Chrome('./chromedriver', options=options)
url = 'https://socialblade.com/youtube/channel/' + channel_id + '/monthly'
driver.get(url)
el = '//*[@id="socialblade-user-content"]'
content = driver.find_element_by_xpath(el).text.split('\n')
cnt = 0
flag = False
pk = 0
orm = []
data = dict()
data['yno'] = yno
for line in content:
if line == 'ESTIMATED EARNINGS':
flag = True
continue
elif line == 'Daily Averages':
break
if flag:
if cnt == 1:
cnt += 1
continue
if cnt == 6:
fin = dict()
fin['pk'] = pk
pk += 1
fin['model'] = "dataServer.growth"
fin['fields'] = data
print(fin)
temp = copy.deepcopy(fin)
orm.append(temp)
cnt = 0
continue
elif cnt == 0:
data['recordDate'] = line
cnt += 1
else:
x = line.replace('+', "").replace(",", "").replace('--', '0').replace('LIVE', '')
total_stars = 0
if 'K' in x:
if len(x) > 1:
total_stars = float(x.replace('K', '')) * 1000 # convert k to a thousand
elif 'M' in x:
if len(x) > 1:
total_stars = float(x.replace('M', '')) * 1000000 # convert M to a million
else:
total_stars = int(x) # Less than 1000
if cnt == 2:
data['difSubscriber'] = int(total_stars)
elif cnt == 3:
data['pointSubscriber'] = int(total_stars)
elif cnt == 4:
data['difView'] = int(total_stars)
elif cnt == 5:
data['pointView'] = int(total_stars)
cnt += 1
print(orm)
print(json.dumps(orm, ensure_ascii=False, indent="\t"))
driver.close()
if __name__ == '__main__':
get_growth('UChQ-VMvdGrYZxviQVMTJOHg')
|
987,030 | 16f54600e5de80b03f4062976273d10a2244cfa9 | from django.db import models, reset_queries
from .feeds import URLGenerator
class Url(models.Model):
id = models.AutoField(primary_key=True)
url = models.TextField()
key = models.CharField(max_length=255, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs) -> None:
if not self.id:
super().save(*args, **kwargs)
geneator = URLGenerator()
self.key = geneator.generate_unique_key(self.id)
return super().save(*args, **kwargs)
|
987,031 | 8234c1eb0b49ae5e83aad881925730a42d217910 | #
# [42] Trapping Rain Water
#
# https://leetcode.com/problems/trapping-rain-water
#
# Hard (36.81%)
# Total Accepted:
# Total Submissions:
# Testcase Example: '[]'
#
#
# Given n non-negative integers representing an elevation map where the width
# of each bar is 1, compute how much water it is able to trap after
# raining.
#
#
#
# For example,
# Given [0,1,0,2,1,0,1,3,2,1,2,1], return 6.
#
#
#
#
# The above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In
# this case, 6 units of rain water (blue section) are being trapped. Thanks
# Marcos for contributing this image!
#
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if not height:
return 0
n = len(height)
ans = 0
left, right = 0, n-1
Lwall, Rwall = height[left], height[right]
while left < right:
if height[left]>height[right]:
ans += (Rwall - height[right])
right -= 1
Rwall = max(Rwall, height[right])
else:
ans += (Lwall - height[left])
left += 1
Lwall = max(Lwall, height[left])
return ans
if __name__ == "__main__":
s = Solution()
assert s.trap([0,1,0,2,1,0,1,3,2,1,2,1]) == 6
assert s.trap([4,2,3]) == 1
|
987,032 | 92cb26c32349d448b887fa4c1ebc5039e9ce68b5 | from os.path import exists, join, dirname
from os import symlink
def init(conf, indir):
for name in ('js', 'css', 'scss'):
basedir = join(indir, conf.get(name, name))
if not exists(basedir):
continue
# Project lib directory (e.g. myapp/js/lib)
projpath = join(basedir, 'lib')
# wq source directory (e.g. wq.app/js)
wqpath = join(dirname(dirname(__file__)), name)
if not exists(projpath):
# e.g myapp/js/lib -> wq.app/js
symlink(wqpath, projpath)
elif exists(join(wqpath, 'wq')) and not exists(join(projpath, 'wq')):
# e.g myapp/js/lib/wq -> wq.app/js/wq
symlink(join(wqpath, 'wq'), join(projpath, 'wq'))
|
987,033 | ef3d545dd0f38638f294e1e72fd614c853b7fe38 | import os
import os.path
import getopt
import sys
import ConfigParser
import datetime
import logging
from time import sleep
from dateutil.parser import *
from lib.db import DB
from lib.backup import Backup
class UmaticsSync:
"""UmaticsSync is an universal backup utility with different storage
engines and the ability to work with multiple backup revisions. Each
job is processed in a defined interval.
# Normal backup for a specific job
$ ./umaticssync.py -j documents
# Restore job with name documents and revision number 32
$ ./umaticssync.py -j documents -r 32
# Run as daemon and execute every job in it's interval
$ ./umaticssync.py"""
conf_dir = os.path.expanduser('~/.umaticssync/')
opts = None
job_list = None
sync_list = None
sync_file = None
backup_list_fp = None
backup_size = 0
db = None
def __init__(self):
self.opts = self._parse_arg_list()
# toggle debug mode
if self.opts.has_key('d'):
logging.basicConfig(level=logging.DEBUG)
# overwrite default config path
if self.opts.has_key('c'):
self.conf_dir = self.opts['c']
logging.info('Using %s as config path' % self.conf_dir)
# check if config path exists
if not os.path.isdir(self.conf_dir):
os.mkdir(self.conf_dir)
logging.error(
'No configuration available, created %s' % self.conf_dir
)
sys.exit()
# backup jobs
backup_list = os.path.join(self.conf_dir, 'backup_list')
self.job_list = ConfigParser.ConfigParser()
self.job_list.read(backup_list)
self.db = DB(os.path.join(self.conf_dir, 'files.db'))
# restore
if self.opts.has_key('r') and self.opts.has_key('j'):
logging.info(
'Starting restore mode on job %s with rev %s'
% (self.opts['j'], self.opts['r'])
)
self.restore(self.opts['j'], self.opts['r'])
# backup (job-based)
elif self.opts.has_key('j'):
logging.info('Starting backup mode on job %s' % self.opts['j'])
self._get_backup_list(self.opts['j'])
# backup daemon
else:
logging.info('Starting backup daemon')
try:
while True:
self._get_backup_list()
self._save_sync_list()
sleep(60)
except KeyboardInterrupt:
self._save_sync_list()
logging.info('Exiting')
sys.exit()
# save sync file
if self.sync_list:
self._save_sync_list()
def _save_sync_list(self):
"""Saves the current sync list to the file system."""
fp = open(self.sync_file, 'w')
self.sync_list.write(fp)
fp.close()
def _parse_arg_list(self):
"""Transform list of argument tuples to dict."""
arg_list = {}
for arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:
arg_list[arg[0][1:]] = arg[1]
return arg_list
def _get_backup_list(self, job=None):
"""Get a list of backup jobs and execute the specific job every
given interval."""
# sync list
self.sync_file = os.path.join(self.conf_dir, 'sync')
self.sync_list = ConfigParser.ConfigParser()
self.sync_list.read(self.sync_file)
# fallback to one single job
if job:
jobs = [job, ]
else:
jobs = self.job_list.sections()
for job in jobs:
interval = self.job_list.getint(job, 'interval')
job_dict = self._get_job_dict(job)
job_dict['name'] = job
try:
ls = parse(self.sync_list.get(job, 'last_sync'))
# job is in interval range and is not currently running
if (datetime.datetime.now() - ls) > \
datetime.timedelta(0, interval, 0) \
and lbs != 1:
logging.info('[%s] Starting backup process' % job)
backup = Backup(job_dict, self.db)
backup.backup()
self._set_sync_option(
job, 'last_sync', datetime.datetime.now()
)
except:
logging.info('[%s] Starting backup process' % job)
if not self.sync_list.has_section(job):
self.sync_list.add_section(job)
backup = Backup(job_dict, self.db)
backup.backup()
self._set_sync_option(job, 'last_sync', datetime.datetime.now())
def _set_sync_option(self, job, option, value):
"""Set valiable in a section called job with given value."""
if not self.sync_list.has_section(job):
self.sync_list.add_section(job)
self.sync_list.set(job, option, value)
def _get_job_dict(self, job):
"""Transform tuple construct into dict."""
jobs = {}
for job in self.job_list.items(job):
jobs[job[0]] = job[1]
return jobs
def restore(self, job, revision):
"""Restore backup based on job and revision."""
job_dict = self._get_job_dict(job)
job_dict['name'] = job
# start restore process
backup = Backup(job_dict, self.db)
backup.restore(revision)
if __name__ == '__main__':
UmaticsSync() |
987,034 | 560b6053167a13a563e6e7d51ec7b4aa6d6776af | # import os to use os.path.join
# import panda to read csv file
# import cv2 to resize images to shape (64, 64)
# import torch.utils.data to use dataloader of Pytorch
import os
import pandas as pd
import cv2
import numpy as np
from torch.utils.data import Dataset, DataLoader
# create dataloader class for train set
class TrafficSignTrain(Dataset):
def __init__(self):
# store csv directory
csv_dir = os.path.join("..", "..", "Dataset", "train.csv")
# read csv and store to train
self.csv = pd.read_csv(csv_dir)
def __getitem__(self, idx):
# get the directory of the image, but it is not really correct, so we fix it by joining 2 more '..'
img_dir = str(self.csv.loc[idx,"Directory"])
img_dir = os.path.join("..", "..", "Dataset", img_dir)
# get the classId
classId = self.csv.loc[idx,"Class"]
# get the image
image = cv2.imread(img_dir)
# because the color system of cv2 is BGR, but i want it to be RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# resize
image = cv2.resize(image, (64, 64))
# normalize the image
image = image / 256
# transpose from (64x64x3) to (3x64x64) because of Pytorch convention
image = np.transpose(image, (2, 0, 1))
# return result
sample = {'image': image, 'classId': classId }
return sample
def __len__(self):
return len(self.csv) |
987,035 | 2de9f4e8e055063308dbfd629e0612a5ec3a49c0 | # https://st2.fileurl.link/file/825529672950506/
import urllib.request
url = input("Enter link: ")
url_name = input("name for vid: ")
video = "C:\\Users\\wazih\\Desktop\\"+ url_name + ".mp4"
urllib.request.urlretrieve(url, video) |
987,036 | ce94c2340bcd2bd615253254235b0888f61ee4d8 | # -*- coding: utf-8 -*-
import copy
import arrow
DEFAULT_DATA = dict(
ex='',
contract='',
last=0,
change_percentage=0,
funding_rate=0,
funding_rate_indicative=0,
mark_price=0,
index_price=0,
total_size=0,
volume_24h=0,
volume_24h_usd=0,
volume_24h_btc=0,
quanto_base_rate=0,
time=0,
)
class Ticker(object):
collection = 'ticker'
@staticmethod
def format(data):
'''
格式化数据
:param data:
:return:
'''
new_data = copy.deepcopy(DEFAULT_DATA)
new_data['ex'] = str(data['ex'])
new_data['contract'] = str(data['contract']).lower()
new_data['last'] = float(data['last'])
new_data['change_percentage'] = float(data['change_percentage'])
new_data['funding_rate'] = float(data['funding_rate'])
new_data['funding_rate_indicative'] = str(data['funding_rate_indicative'])
new_data['mark_price'] = float(data['mark_price'])
new_data['index_price'] = float(data['index_price'])
new_data['total_size'] = float(data['total_size'])
new_data['volume_24h'] = float(data['volume_24h'])
new_data['volume_24h_btc'] = float(data['volume_24h_btc'])
new_data['volume_24h_usd'] = float(data['volume_24h_usd'])
if data['quanto_base_rate']:
new_data['quanto_base_rate'] = float(data['quanto_base_rate'])
else:
new_data['quanto_base_rate'] = data['quanto_base_rate']
new_data['time'] = arrow.get(data['time']).datetime
return new_data
|
987,037 | dde2a6dbcd51ba452ae4e9761e9108649b93b2d3 | class Solution:
def isMatch(self, s: str, p: str) -> bool:
string, pattern = [], []
string[:0], pattern[:0] = s, p
string.insert(0, 0)
pattern.insert(0, 0)
s, p = len(string), len(pattern)
dp = [[False for _ in range(p)] for __ in range(s)]
dp[0][0] = True
for i in range(p):
if pattern[i] is '*' and dp[0][i-2]: dp[0][i] = True
for i in range(1, s):
for j in range(1, p):
if pattern[j] is string[i] or pattern[j] is '.':
dp[i][j] = dp[i-1][j-1]
elif pattern[j] is '*' and (pattern[j-1] is string[i] or pattern[j-1] is '.'):
dp[i][j] = dp[i][j-2] or dp[i-1][j]
elif pattern[j] is '*' and not (pattern[j-1] is string[i] or pattern[j-1] is '.'):
dp[i][j] = dp[i][j-2]
return dp[s-1][p-1]
|
987,038 | 53b2e78aa9cae63be9749eae2de5d6135160c667 | #encoding=utf-8
from django.apps import AppConfig
class ProduccionConfig(AppConfig):
name = 'produccion'
verbose_name = u"Producción"
def ready(self, *args, **kwargs):
from .signals import *
|
987,039 | ce882e339c126d03b328c1ba2baa55684b88ee4c | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Admin dashboard page smoke tests."""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
# pylint: disable=protected-access
import random
import re
import pytest
from lib import base, constants, url, users
from lib.constants import objects, messages, roles
from lib.constants.element import AdminWidgetCustomAttributes
from lib.entities import entities_factory
from lib.page import dashboard
from lib.service import admin_webui_service
from lib.utils import selenium_utils
class TestAdminDashboardPage(base.Test):
"""Tests for admin dashboard page."""
_role_el = constants.element.AdminWidgetRoles
_event_el = constants.element.AdminWidgetEvents
@pytest.fixture(scope="function")
def admin_dashboard(self, selenium):
"""Open Admin Dashboard URL and
return AdminDashboard page objects model."""
selenium_utils.open_url(url.Urls().admin_dashboard)
return dashboard.AdminDashboard(selenium)
@pytest.mark.smoke_tests
def test_roles_widget(self, admin_dashboard):
"""Check count and content of role scopes."""
admin_roles_tab = admin_dashboard.select_roles()
expected_dict = self._role_el.ROLE_SCOPES_DICT
actual_dict = admin_roles_tab.get_role_scopes_text_as_dict()
assert admin_dashboard.tab_roles.member_count == len(expected_dict)
assert expected_dict == actual_dict, (
messages.AssertionMessages.
format_err_msg_equal(expected_dict, expected_dict))
@pytest.mark.smoke_tests
def test_events_widget_tree_view_has_data(self, admin_dashboard):
"""Confirms tree view has at least one data row in valid format."""
admin_events_tab = admin_dashboard.select_events()
list_items = [item.text for item in admin_events_tab.get_events()]
assert list_items
items_with_incorrect_format = [
item for item in list_items if not
re.compile(self._event_el.TREE_VIEW_ROW_REGEXP).match(item)]
assert len(items_with_incorrect_format) in [0, 1]
if len(items_with_incorrect_format) == 1:
# A line with incorrect format is created during DB migration.
# We decided it's OK.
assert items_with_incorrect_format[0].startswith(
"by\n{}".format(users.MIGRATOR_USER_EMAIL))
expected_header_text = self._event_el.WIDGET_HEADER
actual_header_text = admin_events_tab.widget_header.text
assert expected_header_text == actual_header_text
@pytest.mark.smoke_tests
def test_check_ca_groups(self, admin_dashboard):
"""Check that full list of Custom Attributes groups is displayed
on Admin Dashboard panel.
"""
ca_tab = admin_dashboard.select_custom_attributes()
expected_ca_groups_set = set(
[objects.get_normal_form(item) for item in objects.ALL_CA_OBJS])
actual_ca_groups_set = set(
[item.text for item in ca_tab.get_items_list()])
assert expected_ca_groups_set == actual_ca_groups_set
@pytest.mark.smoke_tests
@pytest.mark.parametrize(
"ca_type",
AdminWidgetCustomAttributes.ALL_CA_TYPES
)
def test_add_global_ca(self, admin_dashboard, ca_type):
"""Create different types of Custom Attribute on Admin Dashboard."""
def_type = objects.get_normal_form(random.choice(objects.ALL_CA_OBJS))
expected_ca = entities_factory.CustomAttributeDefinitionsFactory().create(
attribute_type=ca_type, definition_type=def_type)
ca_tab = admin_dashboard.select_custom_attributes()
ca_tab.add_custom_attribute(ca_obj=expected_ca)
actual_cas = ca_tab.get_custom_attributes_list(ca_group=expected_ca)
# 'actual_ca': multi_choice_options (None)
self.general_contain_assert(expected_ca, actual_cas,
"multi_choice_options")
def test_create_new_person_w_no_role(self, selenium):
"""Check newly created person is on Admin People widget"""
expected_person = entities_factory.PeopleFactory().create(
system_wide_role=roles.NO_ROLE)
actual_person = admin_webui_service.PeopleAdminWebUiService(
selenium).create_new_person(expected_person)
self.general_equal_assert(expected_person, actual_person)
@pytest.mark.smoke_tests
def test_custom_roles_widget(self, admin_dashboard):
"""Check count and content of roles scopes."""
expected_set = set(
[objects.get_normal_form(item) for
item in objects.ALL_OBJS_W_CUSTOM_ROLES]
)
actual_set = \
admin_dashboard.select_custom_roles().get_objects_text_as_set()
assert admin_dashboard.tab_custom_roles.member_count == len(expected_set)
assert expected_set == actual_set, (
messages.AssertionMessages.
format_err_msg_equal(expected_set, actual_set))
|
987,040 | e86834faf5fe19688e2c69c8e831246fc4389884 | #training resource - https://atrium.ai/resources/build-and-deploy-a-docker-containerized-python-machine-learning-model-on-heroku/
#loading datasets from sklearn
from sklearn import datasets
#to build accuracy we split the dataset into train and text
from sklearn.model_selection import train_test_split
#build model using KNeighbors
from sklearn import neighbors
from sklearn.metrics import accuracy_score
import pickle
iris = datasets.load_iris()
#separating features and target lables in different data frames
x = iris.data
y= iris.target
#spliting data into train and test
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size= 0.3)
knn = neighbors.KNeighborsClassifier()
#training model for prediction on test dataset to calculate accuracy
knn.fit(x_train,y_train)
predictions = knn.predict(x_test)
#testing for the accuracy of the predictions on the data 'x'
print("Accuracy Score: ", accuracy_score(y_test,predictions))
#exporting the ml model coefficients into a pickle 'pkl' file - we do this so that we don't have to retrain the model when we want to make future predictions.
with open('model.pkl', 'wb') as model_pkl:
#store classification model into pickle file
pickle.dump(knn, model_pkl)
|
987,041 | 3fdd2ff6b75e8f7fc60e6c3f4525e15836039967 | from flask import render_template, url_for, redirect, request, Blueprint
from flask_login import login_user, current_user, logout_user, login_required
from app import db, bcrypt
from app.models import User, Post
from app.users.forms import (RegistrationForm, LoginForm, UpdateAccountForm,
RequestResetForm, ResetPasswordForm)
from app.users.utils import save_img, send_reset_email
users = Blueprint('users', __name__)
@users.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.logged_in', user=current_user.username))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(
form.password.data).decode('utf-8')
user = User(username=form.username.data,
email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
print('your account created ')
return redirect(url_for('users.login'))
return render_template("register.html", form=form)
@users.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.logged_in', user=current_user.username))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('main.logged_in', user=user.username))
else:
pass
return render_template('login.html', form=form)
@users.route("/logout")
def logout():
logout_user()
return redirect(url_for('main.index'))
@users.route("/<user>/profile", methods=["GET", "POST"])
@login_required
def profile(user):
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
pic_file = save_img(form.picture.data)
current_user.image_file = pic_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
print('your account has been updated')
return redirect(url_for('users.profile', user=current_user.username))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for(
'static', filename='profile_pics/'+current_user.image_file)
return render_template("profile.html", image_src=image_file, form=form)
@users.route("/<logged_user>/posts/<user>")
@login_required
def user_post(user, logged_user):
posts = Post.query.all()
user_posts = []
for post in posts:
if post.author.username == user:
user_posts.append(post)
return render_template('usersPost.html', posts=user_posts)
@users.route("/reset_password", methods=["GET", "POST"])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('main.logged_in', user=current_user.username))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_reset_email(user)
# email has been sent message
return redirect(url_for('users.login'))
return render_template('reset_request.html', form=form)
@users.route("/reset_password/<token>", methods=["GET", "POST"])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('main.logged_in', user=current_user.username))
user = User.verify_reset_token(token)
if user is None:
pass # invalid or expired token message
return redirect(url_for('users.reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(
form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
print('your account created ')
return redirect(url_for('users.login'))
return render_template('reset_token.html', form=form)
|
987,042 | 37ce26ab016274145c0523c476b9052d49d17f1c | #!/usr/bin/env python3
#!/usr/bin/env bash
# subprojects=(fmt GSL tinyxml2)
# for subproject in "${subprojects[@]}"
# do
# pushd "subprojects/${subproject}"
# git remote update
# git checkout origin/master
# git status
# popd
# done
import subprocess
subprojects = ['fmt', 'GSL', 'tinyxml2']
for subproject in subprojects:
|
987,043 | 71531760834cc440156aea09e8af95d499c19cf7 | import os
import cv2
import random
import albumentations as A
import xml.etree.ElementTree as ET
from glob import glob
from tqdm import tqdm
from lxml.etree import Element, SubElement
def write_xml(save_path, bboxes, labels, filename, height, width, format):
root = Element("annotation")
folder = SubElement(root, "folder")
folder.text = "JPEGImages"
file_name = SubElement(root, "filename")
file_name.text = f'{filename}.jpg'
size = SubElement(root, "size")
w = SubElement(size, "width")
w.text = str(width)
h = SubElement(size, "height")
h.text = str(height)
depth = SubElement(size, "depth")
depth.text = "3"
if labels:
for label, bbox in zip(labels, bboxes):
obj = SubElement(root, 'object')
name = SubElement(obj, 'name')
name.text = label
pose = SubElement(obj, 'pose')
pose.text = 'Unspecified'
truncated = SubElement(obj, 'truncated')
truncated.text = '0'
difficult = SubElement(obj, 'difficult')
difficult.text = '0'
bndbox = SubElement(obj, 'bndbox')
xmin, ymin, xmax, ymax = bbox[0], bbox[1], bbox[2], bbox[3]
if format == "albumentations":
xmin = int(xmin * width + 0.5)
ymin = int(ymin * height + 0.5)
xmax = int(xmax * width + 0.5)
ymax = int(ymax * height + 0.5)
elif format == "yolo":
xmax = int((bbox[0]*width) + (bbox[2] * width)/2.0)
xmin = int((bbox[0]*width) - (bbox[2] * width)/2.0)
ymax = int((bbox[1]*height) + (bbox[3] * height)/2.0)
ymin = int((bbox[1]*height) - (bbox[3] * height)/2.0)
# print(xmin, ymin, xmax, ymax)
node_xmin = SubElement(bndbox, 'xmin')
node_xmin.text = str(int(xmin))
node_ymin = SubElement(bndbox, 'ymin')
node_ymin.text = str(int(ymin))
node_xmax = SubElement(bndbox, 'xmax')
node_xmax.text = str(int(xmax))
node_ymax = SubElement(bndbox, 'ymax')
node_ymax.text = str(int(ymax))
tree = ET.ElementTree(root)
tree.write(f"{save_path}/{filename}.xml")
def visualize(image, bboxes):
vis_img = image.copy()
for bbox in bboxes:
xmin, ymin, xmax, ymax = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
cv2.rectangle(vis_img, (xmin, ymin), (xmax, ymax), color=(0, 0, 255), thickness=3)
return vis_img
def augmentation(image, bboxes, labels):
transformed = transform(image=image, bboxes=bboxes, labels=labels)
t_image, t_bboxes, t_labels = transformed["image"], transformed["bboxes"], transformed["labels"]
return t_image, t_bboxes, t_labels
def refine_coordinates(height, width, x, y, w, h):
xmax = int((x*width) + (w * width)/2.0)
xmin = int((x*width) - (w * width)/2.0)
ymax = int((y*height) + (h * height)/2.0)
ymin = int((y*height) - (h * height)/2.0)
return [xmin, ymin, xmax, ymax]
def record_xml_process(save_dir, txt_files):
print(len(txt_files))
for index in tqdm(range(len(txt_files))):
file = txt_files[index]
file_name = file.split('/')[-1].split('.')[0]
image_file = f"{save_dir}/Frames/{file_name}.jpg"
if os.path.isfile(image_file):
image = cv2.imread(image_file)
data = open(file, "r")
lines = data.readlines()
labels, bboxes = [], []
for line in lines:
line = line.strip().split()
label = int(line[0])
x, y, w, h = list(map(float, line[1:]))
xmin, ymin, xmax, ymax = refine_coordinates(image.shape[0], image.shape[1], x, y, w, h)
labels.append(classes[label])
bboxes.append([xmin, ymin, xmax, ymax])
# result_image, result_bboxes, result_labels = augmentation(image, bboxes, labels)
# write_xml(f"{save_dir}/Annotations", result_bboxes, result_labels, f"{index:>09}", image.shape[0], image.shape[1], format="pascal_voc")
# cv2.imwrite(f"{save_dir}/JPEGImages/{index:>09}.jpg", result_image)
# result = visualize(result_image, result_bboxes)
# cv2.imwrite(f"{save_dir}/Results/{index:>09}.jpg", result)
write_xml(f"{save_dir}/Annotations", bboxes, labels, f"{index:>09}", image.shape[0], image.shape[1], format="pascal_voc")
cv2.imwrite(f"{save_dir}/JPEGImages/{index:>09}.jpg", image)
result = visualize(image, bboxes)
cv2.imwrite(f"{save_dir}/Results/{index:>09}.jpg", result)
if __name__ == "__main__":
path = "/home/ubuntu/Datasets/BR/total"
image_size = 384
classes = ["Baskin_robbins"]
transform = A.Compose([
A.Resize(image_size, image_size, p=1)
], bbox_params=A.BboxParams(format="pascal_voc", label_fields=["labels"]))
txt_files = sorted(glob(f"{path}/exp/labels/*.txt"))
if not os.path.isdir(f"{path}/JPEGImages") and not os.path.isdir(f"{path}/Annotations"):
os.makedirs(f"{path}/JPEGImages")
os.makedirs(f"{path}/Annotations")
os.makedirs(f"{path}/Results")
random.shuffle(txt_files)
record_xml_process(path, txt_files) |
987,044 | 19448c04b8135ce7c7d506070717021dca137ec0 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from datetime import date
dataInicio = date.fromisoformat('2020-01-26')
print(dataInicio)
print(type(dataInicio))
Mudou = dataInicio.strftime('%d/%m/%y')
print(Mudou)
print(type(Mudou))
|
987,045 | c5cdec634159fe260553f2359ccf69fe72a03fb7 | # from prm_core.models import *
# from rest_framework.views import APIView, Response
# from django.contrib.auth.models import AbstractUser,Group, Permission
#
#
# class getPermission(APIView):
# """
# """
# def get(self, request):
#
# # start = request.GET.get('start', 0)
# data = Permission.objects.all().values()
#
# return Response({'status': '1', 'data': data})
#
#
# class getRole(APIView):
#
# def get(self, request):
#
# role_id = request.GET.get('role_id', None)
#
# if role_id is not None:
# return Response({
# 'status': 1,
# 'data': {
# 'permissions': list(map(lambda x: x.id, Roles.objects.get(id=role['id']).permissions.all())),
# 'group': list(map(lambda x: x.id, Roles.objects.get(id=role['id']).group.all()))
# }
# })
# else:
# data = Roles.objects.all().values()
# for role in data:
# per_ids = list(map(lambda x: x.id, Roles.objects.get(id=role['id']).permissions.all()))
# role.setdefault('permissions', per_ids)
# group_ids = list(map(lambda x: x.id, Roles.objects.get(id=role['id']).group.all()))
# role.setdefault('group': group_ids)
#
# return Response({'status': '1', 'data': data})
#
#
# class getUser(APIView):
#
# def get(self, request):
# data = User.objects.all().values()
#
# return Response({'status': 1, 'data': data})
#
#
# class getGroup(APIView):
#
# def get(self, request):
# data = Group.objects.all().values()
# for group in data:
# user_ids = list(map(lambda x:x.id, Group.objects.get(id=group['id']).user_set.all()))
# group.setdefault('user', user_ids)
#
# return Response({'status': '1', 'data': data})
#
# class roleAddGroup(APIView):
#
# def get(self, request):
# role_id = request.GET.get('role_id') #int类型
# group_ids = request.GET.get('group_ids') #list类型
#
# if role_id is None or group_ids is None or len(group_ids) < 1:
# return Response({'status': '2', 'data': 'parameter error'})
#
# for group_id in group_ids:
# Roles.objects.get(id=role_id).group.add(Group.objects.get(id=group_id)) #建立关系 将组添加到角色里
# #将角色里的权限和组建立关系
# Group.objects.get(id=group_id).permissions.add(*Roles.objects.get(id=role['id']).permissions.all())
#
# return Response({'status': '1', 'data': True})
#
#
# class roleRemoveGroup(APIView):
#
# def get(self, request):
# role_id = request.GET.get('role_id') #int类型
# group_ids = request.GET.get('group_ids') #list类型
#
# if role_id is None or group_ids is None or len(group_ids) < 1:
# return Response({'status': '2', 'data': 'parameter error'})
#
# for group_id in group_ids:
# Roles.objects.get(id=role_id).group.remove(Group.objects.get(id=group_id))
# Group.objects.get(id=group_id).permissions.remove(*Roles.objects.get(id=role['id']).permissions.all())
#
# return Response({'status': '1', 'data': True})
#
#
# class roleAddPermission(APIView):
#
# def get(self, request):
# role_id = request.GET.get('role_id')
# pids = request.GET.get('pids')
#
# if role_id is None or pids is None or len(pids) < 1:
# return Response({'status': '2', 'data': 'parameter error'})
#
# Roles.objects.get(id=role_id).permissions.add(*Permission.objects.filter(id__in=pids))
#
# return Response({'status': '1', 'data': True})
#
#
# class roleRemovePermission(APIView):
#
# def get(self, request):
# role_id = request.GET.get('role_id')
# pids = request.GET.get('pids')
#
# if role_id is None or pids is None or len(pids) < 1:
# return Response({'status': '2', 'data': 'parameter error'})
#
# Roles.objects.get(id=role_id).permissions.remove(*Permission.objects.filter(id__in=pids))
#
# return Response({'status': '1', 'data': True})
#
#
# class addRoles(APIView):
#
# def get(self, request):
# name = request.GET.get('name')
# pids = request.GET.get('pids')
# group_ids = request.GET.get('group_ids')
#
# if name is None:
# return Response({'status': '2', 'data': 'parameter error'})
#
# role_object = Roles(name = name)
# role_object.save()
# if pids:
# role_object.add(*Permission.objects.filter(id__in=pids))
# role_object.save()
# if group_ids:
# role_object.add(*Group.objects.filter(id__in=group_ids))
# role_object.save()
#
# return Response({'status': '1', 'data': {'id': role_object.id}})
#
# class removeRoles(APIView):
#
# def get(self, request):
# rids = request.GET.get('rids')
# Roles.objects.filter(id__in=rids).delete()
# return Response({'status': '1', 'data': True})
|
987,046 | ba1a519127d64787487127fbba0c2a7141b37d74 | # -*- coding: utf-8 -*-
from .twxproperty import TWX_Property
class TWX_Template():
def __init__(self,name, **kwargs):
self.name = name
self.allProperties = kwargs.get('allProperties',{}) #{name, TWX_Property}
self.simulateList = kwargs.get('simulateList',[]) #[name] = simulate but not follow
self.followedList = kwargs.get('followedList',[]) #[name] = simulate and follow
self.remoteTemplateName=kwargs.get('remoteTemplateName',name)
def set_remote_template_name(self,templatename):
self.remoteTemplateName=templatename
def set_all_properties(self,all_properties):
self.allProperties=all_properties
def set_simulate_list(self,simulate_list):
self.simulateList = simulate_list
def set_followed_list(self,followed_list):
self.followedList = followed_list
def __repr__(self):
return self.__str__()
def __str__(self):
return '{} has {} properties, \n will simulate:{}, followed:{}'.format(
self.name,
len(self.allProperties),
self.simulateList,
self.followedList
) |
987,047 | d2ae1daf5cd4fdd594a6f1b376fa0d3623c67a13 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Sotfmax.py
#
# Copyright 2016 DC2 <dc2@UASLP-DC2>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""Softmax."""
scores = [3.0/10, 1.0/10, 0.2/10]
import numpy as np
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
#pass # TODO: Compute and return softmax(x)
return np.exp(x) / np.sum(np.exp(x), axis=0)
print(softmax(scores))
# Plot softmax curves
import matplotlib.pyplot as plt
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
plt.plot(x, softmax(scores).T, linewidth=2)
plt.show()
import os
print os.path.exists ("notMNIST_large.tar.gz")
|
987,048 | 354cbf65c7a70074edd01d05b2dd762cbffc216c | # -*- coding: utf-8 -*-
"""
Solucion de la ecuacion de calor usando un esquema implicito
@author: Nicolas Guarin-Zapata
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from diferencias import resolver_implicito
niter = 10000
nx = 50
alpha = 1.0
fuente = lambda x: 1
x = np.linspace(-1, 1, nx)
np.random.seed(201)
u_ini = 0.5 * (1 + x) + 0.1 * np.random.normal(size=nx)
u_a = 0.0
u_b = 1.0
u_ini[0] = u_a
u_ini[-1] = u_b
k_iter = 10
dx = x[1] - x[0]
dt = k_iter * dx**2/alpha
t = np.linspace(0, niter*dt, niter)
U = resolver_implicito(niter, u_ini, alpha, dt, x, fuente)
#%% Animacion
max_val = max(np.max(U), -np.min(U))
fig, ax = plt.subplots()
line, = ax.plot(x, U[0, :])
def update(data):
line.set_ydata(data)
return line,
ani = animation.FuncAnimation(fig, update, U, interval=niter//100,
repeat=True)
plt.grid()
plt.xlabel('x')
plt.ylabel('y(x)')
plt.title('Temperatura en una varilla')
plt.show() |
987,049 | 20634085137ec945a9317c7b78ed7a1a01bbafd4 | """
Not yet functional.
"""
from argparse import Namespace
from medikit.events import subscribe
from medikit.feature import Feature
from medikit.structs import Script
DEFAULT_NAME = '$(shell echo $(PACKAGE) | tr A-Z a-z)'
DOCKER = 'docker'
ROCKER = 'rocker'
class DockerConfig(Feature.Config):
def __init__(self):
self._registry = None
self._user = None
self._name = DEFAULT_NAME
self.use_default_builder()
def set_remote(self, registry=None, user=None, name=DEFAULT_NAME):
self._registry = registry
self._user = user
self._name = name
def _get_default_variables(self):
return dict(
DOCKER='$(shell which docker)',
DOCKER_BUILD='$(DOCKER) build',
DOCKER_BUILD_OPTIONS='',
DOCKER_PUSH='$(DOCKER) push',
DOCKER_PUSH_OPTIONS='',
DOCKER_RUN='$(DOCKER) run',
DOCKER_RUN_COMMAND='',
DOCKER_RUN_OPTIONS='',
)
def _get_default_image_variables(self):
return dict(
DOCKER_IMAGE=self.image,
DOCKER_TAG='$(VERSION)',
)
def use_default_builder(self):
self.builder = DOCKER
self._variables = [
self._get_default_variables(),
self._get_default_image_variables(),
]
self.scripts = Namespace(
build=Script('$(DOCKER_BUILD) $(DOCKER_BUILD_OPTIONS) -t $(DOCKER_IMAGE):$(DOCKER_TAG) .'),
push=Script('$(DOCKER_PUSH) $(DOCKER_PUSH_OPTIONS) $(DOCKER_IMAGE):$(DOCKER_TAG)'),
run=Script(
'$(DOCKER_RUN) $(DOCKER_RUN_OPTIONS) --interactive --tty -p 8000:8000 $(DOCKER_IMAGE):$(DOCKER_TAG) $(DOCKER_RUN_COMMAND)'
),
shell=Script('DOCKER_RUN_COMMAND="/bin/bash" $(MAKE) docker-run'),
)
def use_rocker_builder(self):
self.builder = ROCKER
self._variables = [
self._get_default_variables(),
self._get_default_image_variables(),
{
'ROCKER':
'$(shell which rocker)',
'ROCKER_BUILD':
'$(ROCKER) build',
'ROCKER_BUILD_OPTIONS':
'',
'ROCKER_BUILD_VARIABLES':
'--var DOCKER_IMAGE=$(DOCKER_IMAGE) --var DOCKER_TAG=$(DOCKER_TAG) --var PYTHON_REQUIREMENTS_FILE=requirements-prod.txt',
},
]
self.scripts.build.set('$(ROCKER_BUILD) $(ROCKER_BUILD_OPTIONS) $(ROCKER_BUILD_VARIABLES) .')
self.scripts.push.set('ROCKER_BUILD_OPTIONS="$(ROCKER_BUILD_OPTIONS) --push" $(MAKE) docker-build')
@property
def variables(self):
for variables in self._variables:
yield from sorted(variables.items())
@property
def image(self):
return '/'.join(filter(None, (self._registry, self._user, self._name)))
class DockerFeature(Feature):
Config = DockerConfig
@subscribe('medikit.feature.make.on_generate')
def on_make_generate(self, event):
docker_config = event.config['docker']
for var, val in docker_config.variables:
event.makefile[var] = val
# Targets
event.makefile.add_target('docker-build', docker_config.scripts.build, phony=True)
event.makefile.add_target('docker-push', docker_config.scripts.push, phony=True)
event.makefile.add_target('docker-run', docker_config.scripts.run, phony=True)
event.makefile.add_target('docker-shell', docker_config.scripts.shell, phony=True)
@subscribe('medikit.on_end')
def on_end(self, event):
docker_config = event.config['docker']
self.render_file_inline(
'.dockerignore', '''
**/__pycache__
*.egg-info
.cache
.git
.idea
/Dockerfile
/Projectfile
/Rockerfile
node_modules
static
''', event.variables
)
self.render_file_inline(
'docker-compose.yml', '''
version: '3'
volumes:
# postgres_data: {}
# rabbitmq_data: {}
services:
# postgres:
# image: postgres:10
# ports:
# - 5432:5432
# volumes:
# - postgres_data:/var/lib/postgresql/data
# rabbitmq:
# image: rabbitmq:3
# ports:
# - 5672:5672
# volumes:
# - rabbitmq_data:/var/lib/rabbitmq
'''
)
if docker_config.builder == DOCKER:
self.render_file_inline('Dockerfile', '''
FROM python:3
''')
elif docker_config.builder == ROCKER:
self.render_file_inline(
'Rockerfile', '''
FROM python:3
# Mount cache volume to keep cache persistent from one build to another
MOUNT /app/.cache
WORKDIR /app
# Create application user
RUN useradd --home-dir /app --group www-data app \
&& pip install -U pip wheel virtualenv \
&& mkdir /env \
&& chown app:www-data -R /app /env
# Add and install python requirements in a virtualenv
USER app
RUN virtualenv -p python3 /env/
ADD setup.py *.txt /app/
RUN /env/bin/pip install -r {{ '{{ .PYTHON_REQUIREMENTS_FILE }}' }}
# Add everything else
USER root
ADD . /app
# IMPORT /static /app
# IMPORT /assets.json /app
RUN chown app:www-data -R /app
# Entrypoint
USER app
CMD /env/bin/gunicorn config.wsgi --bind 0.0.0.0:8000 --workers 4
PUSH {{ '{{ .DOCKER_IMAGE }}:{{ .DOCKER_TAG }}' }}
'''
)
else:
raise NotImplementedError('Unknown builder {}'.format(docker_config.builder))
__feature__ = DockerFeature
|
987,050 | e3b4c4cd60893b64f0554baa380985821f962863 | # Generated by Django 2.0.3 on 2018-03-30 09:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('police', '0002_auto_20180328_1811'),
]
operations = [
migrations.CreateModel(
name='constable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('pnr', models.CharField(max_length=100)),
],
),
]
|
987,051 | 1d422d979cf265f2c8c09b7f0dbca007b7d04da8 | from mypackage import db
class StoreModel(db.Model):
__tablename__= 'stores'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(80))
#one_to_many
items=db.relationship('ItemModel',backref='stores',lazy='dynamic')
def __init__(self,name):
self.name=name
def json(self):
return {"stores":self.name,"items":list(map(lambda x: x.json(), self.items.all()))}
@classmethod
def find_by_name(cls,name):
return cls.query.filter_by(name=name).first()
def save_to_stores(self):
db.session.add(self)
db.session.commit()
|
987,052 | c08d5d5d17bd77281c9162e43b51e37bdb1bf4fd | import datetime
from django.conf import settings
from django.test import TestCase
from timepiece import utils
from timepiece.tests import factories
from timepiece.reports.utils import generate_dates
class ReportsTestBase(TestCase):
def setUp(self):
super(ReportsTestBase, self).setUp()
self.user = factories.User()
self.user2 = factories.User()
self.superuser = factories.Superuser()
self.devl_activity = factories.Activity(billable=True)
self.activity = factories.Activity()
self.sick = factories.Project()
self.vacation = factories.Project()
settings.TIMEPIECE_PAID_LEAVE_PROJECTS = {
'sick': self.sick.pk,
'vacation': self.vacation.pk,
}
self.leave = [self.sick.pk, self.vacation.pk]
self.p1 = factories.BillableProject(name='1')
self.p2 = factories.NonbillableProject(name='2')
self.p4 = factories.BillableProject(name='4')
self.p3 = factories.NonbillableProject(name='1')
self.p5 = factories.BillableProject(name='3')
self.default_projects = [self.p1, self.p2, self.p3, self.p4, self.p5]
self.default_dates = [
utils.add_timezone(datetime.datetime(2011, 1, 3)),
utils.add_timezone(datetime.datetime(2011, 1, 4)),
utils.add_timezone(datetime.datetime(2011, 1, 10)),
utils.add_timezone(datetime.datetime(2011, 1, 16)),
utils.add_timezone(datetime.datetime(2011, 1, 17)),
utils.add_timezone(datetime.datetime(2011, 1, 18)),
]
def make_entries(self, user=None, projects=None, dates=None,
hours=1, minutes=0):
"""Make several entries to help with reports tests"""
if not user:
user = self.user
if not projects:
projects = self.default_projects
if not dates:
dates = self.default_dates
for project in projects:
for day in dates:
self.log_time(project=project, start=day,
delta=(hours, minutes), user=user)
def bulk_entries(self, start=datetime.datetime(2011, 1, 2),
end=datetime.datetime(2011, 1, 4)):
start = utils.add_timezone(start)
end = utils.add_timezone(end)
dates = generate_dates(start, end, 'day')
projects = [self.p1, self.p2, self.p2, self.p4, self.p5, self.sick]
self.make_entries(projects=projects, dates=dates,
user=self.user, hours=2)
self.make_entries(projects=projects, dates=dates,
user=self.user2, hours=1)
def check_generate_dates(self, start, end, trunc, dates):
for index, day in enumerate(generate_dates(start, end, trunc)):
if isinstance(day, datetime.datetime):
day = day.date()
self.assertEqual(day, dates[index].date())
|
987,053 | 8ab8e012f9a20841b12cc8427cab463192612bef | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['ToastNotifier']
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
import logging
import threading
import os, ctypes
from pkg_resources import Requirement
from pkg_resources import resource_filename
from Queue import Empty
from time import sleep
# 3rd party modules
from win32api import GetModuleHandle
from win32api import PostQuitMessage
from win32con import COLOR_WINDOW
from win32con import CS_HREDRAW
from win32con import CS_VREDRAW
from win32con import CW_USEDEFAULT
from win32con import IDI_APPLICATION
from win32con import IMAGE_ICON
from win32con import LR_DEFAULTSIZE
from win32con import LR_LOADFROMFILE
from win32con import WM_LBUTTONUP
from win32con import WM_RBUTTONUP
from win32con import WM_DESTROY
from win32con import WM_USER
from win32con import WS_OVERLAPPED
from win32con import WS_SYSMENU
from win32gui import CreateWindow
from win32gui import DestroyWindow
from win32gui import LoadIcon
from win32gui import LoadImage
from win32gui import NIF_ICON
from win32gui import NIIF_ICON_MASK
from win32gui import NIF_INFO
from win32gui import NIF_MESSAGE
from win32gui import NIF_TIP
from win32gui import NIM_ADD
from win32gui import NIM_DELETE
from win32gui import NIM_MODIFY
from win32gui import PumpWaitingMessages
from win32gui import RegisterClass
from win32gui import UnregisterClass
from win32gui import Shell_NotifyIcon
from win32gui import UpdateWindow
from win32gui import WNDCLASS
from multiprocessing import Queue, Process
# ############################################################################
# ########### Prevate functions ####
# ##################################
def startSubprocess(icon_path, MessageQ):
n = NotificationSubprocess(icon_path, MessageQ)
n.event_loop()
# ############################################################################
# ########### Classes ##############
# ##################################
class ToastNotifier(object):
'''
Specifyes an interface for sowing a Windows tost
'''
def __init__(self, IconPath):
self.msgQ = Queue()
self._thread1 = threading.Thread(target=startSubprocess, args=(IconPath, self.msgQ,))
self._thread1.start()
def show_toast(self,ToastMessage):
self.msgQ.put(ToastMessage)
def endProcess(self):
self.msgQ.put("kys")
class Mbox(object):
def __init__(self):
self.visible = 0
def show(self, title, text, style):
if self.visible:
self.destroy()
self.window = ctypes.windll.user32.MessageBoxW(0,text,title,style)
self.visible = 1
def destroy(self):
self.visible = 0
class NotificationSubprocess(object):
'''Create a Windows 10 toast notification.
adapted from: https://github.com/jithurjacob/Windows-10-Toast-Notifications
'''
def __init__(self, icon_path, msg_q):
"""Initialize."""
self.visible = 0
self.log = []
self._thread = None
self.msg_q = msg_q
self.message_box = Mbox()
message_map = {
WM_DESTROY: self.onDestroy,
WM_USER+20 : self.onTaskbarNotify,
}
wc = WNDCLASS()
hinst = wc.hInstance = GetModuleHandle(None)
icon_flags = LR_LOADFROMFILE | LR_DEFAULTSIZE
try:
self.hicon = LoadImage(hinst, os.path.realpath(icon_path),
IMAGE_ICON, 0, 0, icon_flags)
except Exception as e:
print(str(e))
self.hicon = LoadIcon(0, IDI_APPLICATION)
wc.lpszClassName = str("Trustbase_notification") #lol
wc.style = CS_VREDRAW | CS_HREDRAW
#wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = COLOR_WINDOW
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = RegisterClass(wc)
# Create the Window.
style = WS_OVERLAPPED | WS_SYSMENU
self.hwnd = CreateWindow( classAtom, "MITM_alert", style, \
0, 0, CW_USEDEFAULT, CW_USEDEFAULT, \
0, 0, hinst, None)
UpdateWindow(self.hwnd)
self.notification_id = 1
self.show()
def show(self, nid =None):
"""Display the taskbar icon"""
flags = NIF_ICON | NIF_MESSAGE
if nid is None:
nid = (self.hwnd, 0, flags, WM_USER+20, self.hicon)
if self.visible:
self.hide()
Shell_NotifyIcon(NIM_ADD, nid)
self.visible = 1
def hide(self):
"""Hide the taskbar icon"""
if self.visible:
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
self.visible = 0
def event_loop(self):
while(1):
try:
host = self.msg_q.get(False) # non-blocking reed from queue throws queue.Empty
if host == "kys":
return
self.show_toast(host)
except Empty:
pass
#PumpWaitingMessages()
def show_toast(self, host= "this website"): #this function now only logs the websites for the user to see
self.log.insert(0,host)
if len(self.log) > 20:
del self.log[20:]
#flags = NIF_ICON | NIF_MESSAGE | NIF_INFO
#nid = (self.hwnd,0, flags, WM_USER+20, self.hicon, "tool tip", "Your traffic to {} is being monitored by your employer\nClick on the system tray icon to see a complete list".format(host), 9, "TrustBase", NIIF_ICON_MASK)
#self.show(nid)
def onDestroy(self):
self.message_box.destroy()
self.hide()
win32gui.PostQuitMessage(0)
def onTaskbarNotify(self, hwnd, msg, wparam, lparam):
if lparam == WM_LBUTTONUP:
self.show_history()
return 1
def show_history(self):
if self._thread != None and self._thread.is_alive():
pass # learn how to close the active message box
if len(self.log) == 0:
self._thread = threading.Thread(target =self.message_box.show, args=("TrustBase Log", 'No traffic has been monitored by your employer this session', 0))
else:
reverse = self.log[::-1]
self._thread = threading.Thread(target =self.message_box.show, args=("TrustBase Log", 'Your traffic to the following sites has been monitored by your employer:\n- '+'\n- '.join(reverse), 0))
self._thread.start()
'''
def main():
n = ToastNotifier("TrustBaseIcon.ico")
for i in range(1, 5):
n.showTost("site " + str(i))
sleep(2)
if __name__ == '__main__':
#freeze_support()
main()'''
|
987,054 | 1c273967aadd12ca32e7a1e4313e5a6ef424aa8b | from flask import Flask, request, jsonify
import distilbert_model as model
app = Flask(__name__)
@app.route('/')
def hello():
return 'Congrats! Server is working'
# get the json data
@app.route('/get_sentiment', methods = ['POST'])
def get_sentiment():
tx = request.get_json(force = True)
text = tx['Review']
sent = model.get_prediction(text)
return jsonify(result = sent)
if __name__ == '__main__':
app.run(host = '0.0.0.0', port = 5000, debug = True, use_reloader = True)
|
987,055 | 3b9ef36c5b9a9886d9123dd2510da7d4eaa8ab70 | import sys
import json
import os.path
import colorsys
from PIL import Image,ImageDraw
import numpy as np
def run():
if len(sys.argv) > 1:
imgpath = sys.argv[1]
else:
imgpath = "assets/test/tree.png"
inpImg = Image.open(imgpath)
if inpImg.size != (16,16):
raise Exception("invalid size")
f = open("skintable.json", "r")
skinHueLocs = {}
for k,v in json.load(f).items():
skinHueLocs[int(k)] = v
f.close()
f = open("rendertable.json", "r")
renderHueLocs = {}
for k,v in json.load(f).items():
renderHueLocs[int(k)] = v
f.close()
output = img = Image.new( 'RGBA', (1024,1024), (255, 0, 0, 0))
pixels = output.load()
for skinHue,outputList in skinHueLocs.items():
try:
renderpos = renderHueLocs[skinHue]
except:
raise Exception(f"{outputList[0]} is not mapped")
(px,py) = renderpos[0]
loc = (px,py)
desiredcol = inpImg.getpixel(loc)
(r,g,b,a) = desiredcol
(h,s,v) = colorsys.rgb_to_hsv(r/255.,g/255.,b/255.)
faceBrightnessModifier = {
"L": 0.686,
"F": 0.396,
"R": 0.443,
"B": 0.988,
"U": 0.914,
"D": 0 #not actually 0 but not visible
}
for x,y in outputList:
face = getFace(x,y)
vModifier = faceBrightnessModifier[face]
nv = v/vModifier
if nv > 1.001:
raise Exception(f"(x,y)={loc} is too bright, max V for this location is {vModifier*100}% in HSV")
nv = min(nv,1)
(nr,ng,nb) = colorsys.hsv_to_rgb(h, s, nv)
requiredColor = (int(nr*256),int(ng*256),int(nb*256),a)
pixels[x,y] = requiredColor
fname = os.path.basename(imgpath)
try:
os.mkdir("out")
except:""
output.save(f"out/{fname}")
def getFace(x,y):
s = 128
if 4*s<=x<5*s and s<=y<s*2:
return "L"
elif 5*s<=x<6*s and s<=y<s*2:
return "F"
elif 6*s<=x<7*s and s<=y<s*2:
return "R"
elif 7*s<=x<8*s and s<=y<s*2:
return "B"
elif 5*s<=x<6*s and 0<=y<s:
return "U"
elif 6*s<=x<7*s and 0<=y<s:
return "D"
else:
raise Exception(f"(x,y)={(x,y)} is not mapped to a face, how did you even manage this?")
run() |
987,056 | 9b0255f8dc03dcffc156826fb195f44e649484ef | import numpy as np
import numpy.matlib
import scipy.io as sio
# scale the data to the desired range (default is 0:1)...will work on matrices down columns (i.e. matlab style)
def scaleData(data, minVal=0, maxVal=1):
data = np.asanyarray(data) # this will convert lists/tuples etc to array...if data already an array then no effect
sz = data.shape
minData = np.amin(data, axis=0)
maxData = np.amax(data, axis=0)
# then scale
scaled_data = data - np.matlib.repmat(minData,sz[0],1)
scaled_data = np.multiply(np.divide(scaled_data, np.matlib.repmat(np.ptp(scaled_data,axis=0),sz[0],1)),(np.matlib.repmat(maxVal-minVal,sz[0],sz[1])))
scaled_data = scaled_data + np.matlib.repmat(minVal,sz[0],sz[1])
return scaled_data
# zscore and return mean and std as well (matrix or column vector input with row == samples/trials, columns==feature)
def zscore(data):
data = np.asanyarray(data)
sz=data.shape
mean_data = np.mean(data, axis=0)
std_data = np.std(data,axis=0)
zdata = np.divide((data-np.matlib.repmat(mean_data,sz[0],1)),np.matlib.repmat(std_data, sz[0], 1))
return zdata, mean_data, std_data
def readMatData(file_name_in,field_name,file_name_out):
# will read in data from "file_name_in.mat" and then return the data in field_name
# as a np.array and then will save a npy file if "file_name_out" is 1 (default 0)
print("reading and converting:",file_name_in)
# first load the data and get the desired field
tmp = sio.loadmat(file_name_in)
data = np.array(tmp[field_name])
# then convert the data to numpy friendly load/save format
np.save(file_name_out, data)
return data
def binOrientations(labels, num_bins):
# will take a vector of trial labels (orientations in this case) and bin them into
# num_bins discrete categories. Note that num_bins must go into 180 evenly for this to
# work right...can modify to handle other cases as needed.
# do a quick check to see if num_bins goes evenly into 180
if 180%num_bins!=0:
print("error num_bins must go evenly into 180")
return()
# sort the trial labels into bins
sz = labels.shape
binned_labels = np.zeros(sz[0])
# do the sorting
bc = np.arange(180/num_bins, 180+180/num_bins, 180/num_bins)
for i in range(sz[0]):
for j in range(len(bc)):
if j==0:
if labels[i] <= bc[j]:
binned_labels[i]=j
break
else:
if (labels[i] > bc[j-1]) & (labels[i] <= bc[j]):
binned_labels[i]=j
return binned_labels
# make basis functions. input in radians.
def make_basis_function(x,mu,num_chans):
basis_func=np.power(np.cos(np.subtract(x, mu)),np.subtract(num_chans,(num_chans%2)))
return basis_func |
987,057 | 4470a85b2a5261b77efbec6e726bddb110a327b0 | # -*- coding: utf-8 -*-
"""
Created on Sat May 16 12:17:30 2020
@author: Achuth MG
"""
import PyPDF2
pdffileobj = open('C://Users//kotre/Desktop/Chartered-Data-Scientists-Curriculum-2020 (1).pdf','rb')
pdfreader=PyPDF2.PdfFileReader(pdffileobj)
print(pdfreader.getNumPages())
pageobj=pdfreader.getPage(1)
text_in_page = pageobj.extractText()
print(text_in_page)
folder_name = text_in_page.lower().split(',')
folder_name1= [x.strip(' ') for x in folder_name]
folder_name2= [x.replace('\n','') for x in folder_name1]
replacethese=['(',')','\n','-']
folder_name3= [x.replace('(','') for x in folder_name2]
folder_name3= [x.replace(')','') for x in folder_name3]
folder_name3= [x.replace('-','') for x in folder_name3]
folder_name3= [x.replace(':','') for x in folder_name3]
import os
main_path='F:\GitHub Repo\FinestStatsModel\FinestStatsModel\Python'
os.chdir(main_path)
os.path.join(main_path,folder_name3[2])
paths=[]
for foldernames in folder_name3:
paths.append(os.path.join(main_path,foldernames))
for path in paths:
os.makedirs(path)
|
987,058 | 648a1bb966db74db0625e4aa5ada5f4ae29c400f | #!/usr/bin/env python
## This file is part of Scapy
## This program is published under a GPLv2 license
"""
TLS client used in unit tests.
Start our TLS client, send our send_data, and terminate session with an Alert.
Optional cipher_cuite_code and version may be provided as hexadecimal strings
(e.g. c09e for TLS_DHE_RSA_WITH_AES_128_CCM and 0303 for TLS 1.2).
Reception of the exact send_data on the server is to be checked externally.
"""
import sys, os, time
import multiprocessing
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),"../../"))
sys.path=[basedir]+sys.path
from scapy.layers.tls.automaton_cli import TLSClientAutomaton
from scapy.layers.tls.handshake import TLSClientHello
send_data = cipher_suite_code = version = None
def run_tls_test_client(send_data=None, cipher_suite_code=None, version=None):
if version == "0002":
t = TLSClientAutomaton(data=[send_data, b"stop_server", b"quit"], version="sslv2")
else:
ch = TLSClientHello(version=int(version, 16), ciphers=int(cipher_suite_code, 16))
t = TLSClientAutomaton(client_hello=ch, data=[send_data, b"stop_server", b"quit"], debug=1)
t.run()
from travis_test_server import run_tls_test_server
def test_tls_client(suite, version, q):
msg = ("TestC_%s_data" % suite).encode()
# Run server
q_ = multiprocessing.Manager().Queue()
th_ = multiprocessing.Process(target=run_tls_test_server, args=(msg, q_))
th_.start()
# Synchronise threads
assert q_.get() is True
time.sleep(1)
# Run client
run_tls_test_client(msg, suite, version)
# Wait for server
th_.join(60)
if th_.is_alive():
th_.terminate()
raise RuntimeError("Test timed out")
# Return values
q.put(q_.get())
q.put(th_.exitcode)
|
987,059 | 77724fcd40696209198497a80e74c85f646906c7 | from Implementation import *
from Suduku_entropy import *
import numpy as np
x_y_joint_distribution = np.array([[1/8, 1/16, 1/16, 1/4],
[1/16, 1/8, 1/16, 0],
[1/32, 1/32, 1/16, 0],
[1/32, 1/32, 1/16, 0]])
w_given_x_y = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
z_given_x_y = np.array([[0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0]])
x_probability_distribution = np.sum(x_y_joint_distribution, axis=0)
y_probability_distribution = np.sum(x_y_joint_distribution, axis=1)
w_probability_distribution = [5/16, 11/16]
z_probability_distribution = [11/16, 5/16]
x_w_joint_distribution = np.array([[1/8, 1/8, 1/16, 0],
[1/8, 1/8, 3/16, 1/4]])
y_w_joint_distribution = np.array([[1/8, 1/8, 1/16, 0],
[3/8, 1/8, 1/16, 1/8]])
w_z_joint_distribution = np.array([[0, 5/16],
[11/16, 0]])
y_z_joint_distribution = np.array([[3/8, 1/8, 1/16, 1/8],
[1/8, 1/8, 1/16, 0]])
x_y_w_joint_distribution = np.array([
[
[1/8, 0],
[0, 1/16],
[0, 1/32],
[0, 1/32]
],
[
[0, 1/16],
[1/8, 0],
[0, 1/32],
[0, 1/32]
],
[
[0, 1/16],
[0, 1/16],
[1/16, 0],
[0, 1/16]
],
[
[0, 1/4],
[0, 0],
[0, 0],
[0, 0]
]
])
w_z_x_joint_distribution = np.array([
[
[0, 0, 0, 0],
[1/8, 1/8, 1/16, 0]
],
[
[1/8, 1/8, 3/16, 1/4],
[0, 0, 0, 0]
]
])
print("1. H(X) = " + str(entropy(x_probability_distribution)))
print(" H(Y) = " + str(entropy(y_probability_distribution)))
print(" H(W) = " + str(entropy(w_probability_distribution)))
print(" H(Z) = " + str(entropy(z_probability_distribution)) + "\n")
print("2. H(X,Y) = " + str(joint_entropy(x_y_joint_distribution)))
print(" H(X,W) = " + str(joint_entropy(x_w_joint_distribution)))
print(" H(Y,W) = " + str(joint_entropy(y_w_joint_distribution)))
print(" H(W,Z) = " + str(joint_entropy(w_z_joint_distribution)) + "\n")
print("3. H(X|Y) = " + str(conditional_entropy(x_y_joint_distribution)))
print(" H(W|X) = " + str(conditional_entropy(x_w_joint_distribution.transpose())))
print(" H(Z|W) = " + str(conditional_entropy(w_z_joint_distribution)))
print(" H(W|Z) = " + str(conditional_entropy(np.transpose(w_z_joint_distribution))) + "\n")
print("4. H(X,Y|W) = " + str(cond_joint_entropy(x_y_w_joint_distribution)))
print(" H(W,Z|X) = " + str(cond_joint_entropy(w_z_x_joint_distribution)) + "\n")
print("5. I(X;Y) = " + str(mutual_information(x_y_joint_distribution)))
print(" I(X;W) = " + str(mutual_information(x_w_joint_distribution)))
print(" I(Y;Z) = " + str(mutual_information(y_z_joint_distribution)))
print(" I(W;Z) = " + str(mutual_information(w_z_joint_distribution)) + "\n")
print("6. I(X;Y|W) = " + str(cond_mutual_information(x_y_w_joint_distribution)))
print(" I(W;Z|X) = " + str(cond_mutual_information(w_z_x_joint_distribution)) + "\n")
print("13. H(single_square) = " + str(single_square_entropy()) + "\n")
square = [[0, 2, 0],
[8, 0, 0],
[0, 3, 0]]
print("14. H(square) = " + str(square_entropy(square)) + "\n")
sudoku_grid = np.load("sudoku.npy")
print("15. H(grid) = " + str(sudoku_entropy(sudoku_grid)) + "\n")
|
987,060 | ddd4ddb0b201709666a2067c23fae24c682556c1 | import re
import random
import jieba
import jieba.posseg as pos
jieba.add_word("放假", tag="n")
def get_rule_list(rule):
rule_list = []
for k in list(rule.keys()):
rule_list.append(''.join(re.findall('x(.*?)\?', k)))
for i in range(len(rule_list) - 1): # 冒泡排序,最大匹配
for j in range(len(rule_list) - 1):
if len(rule_list[j]) < len(rule_list[j + 1]):
rule_list[j], rule_list[j + 1] = rule_list[j + 1], rule_list[j]
return rule_list
def pat_match(saying, rule, rule_list):
n_list = []
answer = ''
for i in rule_list:
if i in saying:
for k in list(rule.keys()):
if i in k:
answer += ''.join(random.choice(list(rule[k])))
break
for s in saying.split(i):
if s == '':
n_list.append('noun')
continue
for w, f in pos.cut(s):
if f == 'n':
n_list.append(w)
break
pat = dict(zip(['?x', '?y'], n_list))
for p in pat.keys():
answer = answer.replace(p, pat[p])
return answer
rules = {'?x我想?y': ['你觉得?y有什么意义呢?', '为什么你想?y?', '你可以想想你很快就可以?y了。'],
'?x我想要?y': ['不,你不想要?y', '你写完作业才能?y']}
def main():
rule_list = get_rule_list(rules)
saying = '老师,我想放假。'
print(saying)
answer = pat_match(saying, rules, rule_list)
print(answer)
saying1 = '我想要出去玩。'
print(saying1)
answer1 = pat_match(saying1, rules, rule_list)
print(answer1)
if __name__ == '__main__':
main()
|
987,061 | 5d33ca50114ab5be0764237c359b513a3b7dc282 | import json
from models import *
from database import *
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for, jsonify
)
#success response
def success(obj):
return jsonify({'code':0,'msg':'success','data':obj}), 200
#failed response
def failed(code,msg):
return jsonify({'code':code,'msg':msg}), 200
#get user info
def user_info():
email = session.get("login_user")
if email == None:
return None
row = db.session.query(RegUser).filter_by(email=email).first()
return row
#get all tags
def get_tags(id):
rows = db.session.query(QuestionTagRel).filter_by(question_id=id).all()
tag_ids = []
for item in rows:
tag_ids.append(item.tag_id)
tags = db.session.query(Tags).filter(Tags.id.in_(tag_ids)).all()
items = []
for tag in tags:
items.append(tag.name)
return items
#get one tag info
def get_tags_info(id):
rows = db.session.query(QuestionTagRel).filter_by(question_id=id).all()
tag_ids = []
for item in rows:
tag_ids.append(item.tag_id)
tags = db.session.query(Tags).filter(Tags.id.in_(tag_ids)).all()
items = []
for tag in tags:
items.append(tag.column_dict())
return items
#get tag info by tag name
def get_tag_by_name(name):
rows = db.session.query(Tags).filter_by(name=name).first()
return rows
#get all category
def get_category_list():
return db.session.query(Category).all();
#get all tags
def get_tag_list():
return db.session.query(Tags).all();
#convert object list to dict list
def convert_dict(rows):
ret = []
for item in rows:
ret.append(item.column_dict())
return ret |
987,062 | a4bfb72529025b462ed384d82b994182ecf5957a | import pytest
import duckdb
class TestExplain(object):
def test_explain_basic(self):
res = duckdb.sql('select 42').explain()
assert isinstance(res, str)
def test_explain_standard(self):
res = duckdb.sql('select 42').explain('standard')
assert isinstance(res, str)
res = duckdb.sql('select 42').explain('STANDARD')
assert isinstance(res, str)
res = duckdb.sql('select 42').explain(duckdb.STANDARD)
assert isinstance(res, str)
res = duckdb.sql('select 42').explain(duckdb.ExplainType.STANDARD)
assert isinstance(res, str)
res = duckdb.sql('select 42').explain(0)
assert isinstance(res, str)
def test_explain_analyze(self):
res = duckdb.sql('select 42').explain('analyze')
assert isinstance(res, str)
res = duckdb.sql('select 42').explain('ANALYZE')
assert isinstance(res, str)
res = duckdb.sql('select 42').explain(duckdb.ANALYZE)
assert isinstance(res, str)
res = duckdb.sql('select 42').explain(duckdb.ExplainType.ANALYZE)
assert isinstance(res, str)
res = duckdb.sql('select 42').explain(1)
assert isinstance(res, str)
def test_explain_df(self):
pd = pytest.importorskip("pandas")
df = pd.DataFrame({'a': [42]})
res = duckdb.sql('select * from df').explain('ANALYZE')
assert isinstance(res, str)
|
987,063 | e5942fba96c4239c1e919c83b597c9712991d9aa | import requests
from lxml import etree
url = "http://www.36yeye.com/search.asp"
datas = {"type":"vedio","searchword":"教师".encode("gbk")}
responses = requests.post(url,data=datas)
responses_html = etree.HTML(responses.content.decode("gbk"))
page_elements = responses_html.xpath('//div[@class="pagination"]/ul/li')
page_url = page_elements[-2].xpath('./a/@href')[0] #page_url 搜索的url
page_max = page_elements[-2].xpath('./a/text()')[0] #page_max 页码的最大数
'''
@funcName: dowload_vedios
@function: 输入url以返回视频的url与title,并存入列表vedios_tmp
@Time : 2018-08-21
'''
def dowload_vedios(url):
vedios_tmp = []
responses = requests.get(url)
responses_html = etree.HTML(responses.content.decode("gbk"))
vedio_tmps = responses_html.xpath('//div[@class="video_box"]')
# print(responses.content.decode("gbk"))
for vedio_tmp in vedio_tmps:
vedios = {}
vedios["url"] = vedio_tmp.xpath('./a/@href')[0]
vedios["title"] = vedio_tmp.xpath('./a/img/@title')[0]
vedios_tmp.append(vedios)
return vedios_tmp
# print(vedios_tmp)
'''
@funcName: get_vedios
@function: 输入视频的url与title,以下载视频并保存
@Time: 2018-08-21
'''
def get_vedios(url,title):
r = requests.get(url)
# print(r.content.decode("gbk"))
r_element = etree.HTML(r.content.decode("gbk"))
vedio_url = r_element.xpath('//script[@type="text/javascript"]/text()')[0].split(";")[-3].strip()[8:-2]
print(title)
r_vedio = requests.get(vedio_url)
with open(title + ".mp4","wb") as f:
f.write(r_vedio.content)
f.close()
for i in range(2,int(page_max) + 1):
url_tmp = url + page_url[:6] + str(i) + page_url[7:].strip("0")
vedio_boxs = dowload_vedios(url_tmp)
for vedio_box in vedio_boxs:
get_vedios(vedio_box['url'],vedio_box['title'])
print("next")
|
987,064 | e6118bc8036f7e6051af60122c32395253f16686 | # -*- coding: utf-8 -*-
__author__ = 'Javier Andrés Mansilla'
__email__ = 'javimansilla@gmail.com'
__version__ = '0.1.0'
import logging
def basicConfig(**kwargs):
options = {'level': logging.INFO,
'format': "%(name)s %(levelname)s %(asctime)s - %(message)s"
}
options.update(kwargs)
logging.basicConfig(**options)
|
987,065 | c59c4d1e2f3e81341d40170a5de9760c83804753 | import shelve
berkas = open('L200190180.txt', 'r')
F = shelve.open('kegiatan1.data')
print (F['Data'][2])
print (F['Data'][0])
print (F['Data'][1])
|
987,066 | 0c9ab97bc28b7a02d57cb3f1c8312a7e96c78f20 | from flask import Flask
import requests
import string
import os
import urllib
# reinitialising my file
f = open("listing.txt", "w")
f.write("")
f.close()
# request the list of metadata and convert it to string then split the content in to list
resp= requests.get("http://169.254.169.254/latest/meta-data/")
getcont = resp.content
getcont =getcont.decode('utf-8')
splitcont = getcont.split('\n')
# initializing my variables
conttodic = {1 :""}
indx=1
# printing the list on the screen and saving it to dictionary (converting it from list)
for item in splitcont:
conttodic [indx] = item
print(str(indx) + "-" + conttodic[indx])
indx +=1
# saving the list in to a file (optinal)
i=1
for wrtcont in splitcont:
f = open("listing.txt", "a")
f.write(str(i) + " " + wrtcont + "\n")
i +=1
f.close()
while True:
try :
metaindex = int(input("Please enter the number of the Metadata that you wish to fetch from the list above or ctrl+c to exit: "))
if metaindex > 0 and metaindex <= len(splitcont):
temp = conttodic[metaindex]
fulurl = "http://169.254.169.254/latest/meta-data/{}"
resp2= requests.get(fulurl.format(temp))
getcont2 = resp2.content
getcont2 = getcont2.decode('utf-8')
print(getcont2)
else:
print( metaindex , "is out of the index range ")
except KeyboardInterrupt:
print('\n')
break
except:
print ("please enter an integer or ctrl+c to exit: ")
|
987,067 | db9c8bbc85909701296c2564c723abe729a578eb | from django.db.models import Count
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from api.models import City, CityFact, CityImage, CityVisitLog
from api.modules.city.serializers import AllCitiesSerializer, CitySerializer, CityImageSerializer, CityFactSerializer, \
CityVisitSerializer
@api_view(['GET'])
def get_all_cities(request, no_of_cities=8):
"""
Returns a list of cities with maximum number of logs (visits)
:param request:
:param no_of_cities: (default count: 8)
:return: 200 successful
"""
cities = City.objects.annotate(visit_count=Count('logs')).order_by('-visit_count')[:no_of_cities]
serializer = AllCitiesSerializer(cities, many=True)
return Response(serializer.data)
@api_view(['GET'])
def get_city(request, city_id):
"""
Returns a city on the basis of city id
:param request:
:param city_id:
:return: 404 if invalid city id is sent
:return: 200 successful
"""
try:
city = City.objects.get(pk=city_id)
except City.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# Add city visit log
try:
city_visit_log = CityVisitLog(city=city, user=request.user)
city_visit_log.save()
except Exception as e:
pass
serializer = CitySerializer(city)
return Response(serializer.data)
@api_view(['GET'])
def get_city_by_name(request, city_prefix):
"""
Returns a list of cities that starts with the given city prefix
:param request:
:param city_prefix:
:return: 200 successful
"""
cities = City.objects.filter(city_name__istartswith=city_prefix)[:5]
serializer = AllCitiesSerializer(cities, many=True)
return Response(serializer.data)
@api_view(['GET'])
def get_all_city_images(request, city_id):
"""
Returns a list of all the images for a given city id
:param request:
:param city_id:
:return: 404 if invalid city id is sent
:return: 200 successful
"""
try:
city_images = CityImage.objects.filter(city=city_id)
except CityImage.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = CityImageSerializer(city_images, many=True)
return Response(serializer.data)
@api_view(['GET'])
def get_all_city_facts(request, city_id):
"""
Returns a list of all the facts for a given city id
:param request:
:param city_id:
:return: 404 if invalid city id is sent
:return: 200 successful
"""
try:
city_facts = CityFact.objects.filter(city=city_id)
except CityFact.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = CityFactSerializer(city_facts, many=True)
return Response(serializer.data)
@api_view(['GET'])
def get_city_visits(request):
"""
Returns a list of cities visited by a user
:param request:
:return: 404 if invalid user not authenticated
:return: 200 successful
"""
city_visits = CityVisitLog.objects.filter(user=request.user).values('city_id').annotate(
visit_count=Count('city')).order_by('-visit_count')
for visit in city_visits:
visit['city'] = City.objects.get(pk=visit['city_id'])
serializer = CityVisitSerializer(city_visits, many=True)
return Response(serializer.data)
|
987,068 | deb8761f8ee53f3ae682e4c961a405048613f306 | from django.conf import settings
from sentry.utils.services import LazyServiceWrapper
from .base import StringIndexer
backend = LazyServiceWrapper(
StringIndexer,
settings.SENTRY_METRICS_INDEXER,
settings.SENTRY_METRICS_INDEXER_OPTIONS,
)
backend.expose(locals())
from typing import TYPE_CHECKING
if TYPE_CHECKING:
bulk_record = StringIndexer().bulk_record
record = StringIndexer().record
resolve = StringIndexer().resolve
reverse_resolve = StringIndexer().reverse_resolve
|
987,069 | 314e7e7c48ed5dc6a4085cc9ecd3c42ff5ac8b80 | # Original algorithm by gdkchan
# Ported and improved (a tiny bit) by Stella/AboodXD
BCn_formats = [0x42, 0x43, 0x44, 0x49, 0x4a, 0x4b, 0x4c]
bpps = {0x25: 4, 0x38: 4, 0x3d: 4, 0x3c: 2, 0x3b: 2, 0x39: 2, 1: 1, 0xd: 2,
0x42: 8, 0x43: 16,0x44: 16, 0x49: 8, 0x4a: 8, 0x4b: 16, 0x4c: 16}
xBases = {1: 4, 2: 3, 4: 2, 8: 1, 16: 0}
padds = {1: 64, 2: 32, 4: 16, 8: 8, 16: 4}
def roundSize(size, pad):
mask = pad - 1
if size & mask:
size &= ~mask
size += pad
return size
def pow2RoundUp(v):
v -= 1
v |= (v+1) >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
return v + 1
def isPow2(v):
return v and not v & (v - 1)
def countZeros(v):
numZeros = 0
for i in range(32):
if v & (1 << i):
break
numZeros += 1
return numZeros
def deswizzle(width, height, format_, data):
pos_ = 0
bpp = bpps[format_]
origin_width = width
origin_height = height
if format_ in BCn_formats:
origin_width = (origin_width + 3) // 4
origin_height = (origin_height + 3) // 4
xb = countZeros(pow2RoundUp(origin_width))
yb = countZeros(pow2RoundUp(origin_height))
hh = pow2RoundUp(origin_height) >> 1;
if not isPow2(origin_height) and origin_height <= hh + hh // 3 and yb > 3:
yb -= 1
width = roundSize(origin_width, padds[bpp])
result = bytearray(data)
xBase = xBases[bpp]
for y in range(origin_height):
for x in range(origin_width):
pos = getAddr(x, y, xb, yb, width, xBase) * bpp
if pos_ + bpp <= len(data) and pos + bpp <= len(data):
result[pos_:pos_ + bpp] = data[pos:pos + bpp]
pos_ += bpp
return result
def swizzle(width, height, format_, data):
pos_ = 0
bpp = bpps[format_]
origin_width = width
origin_height = height
if format_ in BCn_formats:
origin_width = (origin_width + 3) // 4
origin_height = (origin_height + 3) // 4
xb = countZeros(pow2RoundUp(origin_width))
yb = countZeros(pow2RoundUp(origin_height))
hh = pow2RoundUp(origin_height) >> 1;
if not isPow2(origin_height) and origin_height <= hh + hh // 3 and yb > 3:
yb -= 1
width = roundSize(origin_width, padds[bpp])
result = bytearray(data)
xBase = xBases[bpp]
for y in range(origin_height):
for x in range(origin_width):
pos = getAddr(x, y, xb, yb, width, xBase) * bpp
if pos + bpp <= len(data) and pos_ + bpp <= len(data):
result[pos:pos + bpp] = data[pos_:pos_ + bpp]
pos_ += bpp
return result
def getAddr(x, y, xb, yb, width, xBase):
xCnt = xBase
yCnt = 1
xUsed = 0
yUsed = 0
address = 0
while (xUsed < xBase + 2) and (xUsed + xCnt < xb):
xMask = (1 << xCnt) - 1
yMask = (1 << yCnt) - 1
address |= (x & xMask) << xUsed + yUsed
address |= (y & yMask) << xUsed + yUsed + xCnt
x >>= xCnt
y >>= yCnt
xUsed += xCnt
yUsed += yCnt
xCnt = max(min(xb - xUsed, 1), 0)
yCnt = max(min(yb - yUsed, yCnt << 1), 0)
address |= (x + y * (width >> xUsed)) << (xUsed + yUsed)
return address
|
987,070 | 69608e002cdabe2d13338c3285477cae31e0994d | #!/usr/bin/python3
import random
x = []
for _ in range(4):
x.append(str(random.randint(1,10)))
print(' '.join(x)) |
987,071 | aa9765d793f00d7b9d71c79a80b68d9ada59f177 | import sys
import imp
import os
MEMORY_LAYPUT = '''
MEMORY
{
ram : ORIGIN = 0x%x, LENGTH = 0x%x
}
'''
LDS_FORMAT1 = '''
SECTIONS
{
. = 0x%(text_addr)x;
.text : { *(.text) }
.data : { *(.data) }
%(other_sections)s
}
'''
LDS_FORMAT2 = '''
SECTIONS
{
.text : { *(.text) } > ram
.data : { *(.data) } > ram
%(other_sections)s
}
'''
HOOK_SECTION_FORMAT = '''
. = 0x%(hook_addr)x;
.%(hook_section_name)s = .;
'''
def generate_lds(symbols, hook_sections, memory_layout=None):
'''
param symbols: a dict of {symbol : address}
param hook_sections: a dict of {hook_addr : hook_section_name}
param (optional) memory_layout: a tuple of (orig, length)
returns: ld script
'''
other_sections = '\n\n'.join(HOOK_SECTION_FORMAT % {'hook_addr' : hook_addr,
'hook_section_name' : hook_section_name} \
for hook_section_name, hook_addr in hook_sections.iteritems())
if memory_layout is None:
lds = LDS_FORMAT1 % {'text_addr' : symbols['my_text_address'],
'other_sections' : other_sections}
else:
lds = MEMORY_LAYPUT % memory_layout
lds += LDS_FORMAT2 % {'other_sections' : other_sections}
symbols_text = '\n'.join('%s = 0x%x;' % (sym_name, sym_addr) for sym_name, sym_addr in symbols.iteritems())
lds = symbols_text + lds
return lds
def main(config_file_name, output_lds_file_name):
'''
param config_file_name: a python file with globals of symbols
param output_lds_file_name: where to save the ld script
'''
config = imp.load_source(os.path.basename(config_file_name).split('.py')[0],
config_file_name)
lds = generate_lds(config.symbols, config.hook_sections)
with open(output_lds_file_name, 'wb') as lds_file:
lds_file.write(lds)
if __name__ == '__main__':
main(*sys.argv[1:])
|
987,072 | 58466ee3935c4aec18684b164a07bbb3552b0327 | #!/usr/bin/env vpython
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script for determining which tests are unexpectedly passing.
This is particularly of use for GPU tests, where flakiness is heavily suppressed
but may be forgotten to be unsuppressed once the root cause is fixed.
This script depends on the `rdb` tool, which is available as part of depot
tools, and the `bq` tool, which is available as part of the Google Cloud SDK
https://cloud.google.com/sdk/docs/quickstarts.
Example usage:
find_unexpected_passing_tests.py \
--builder <builder to check, can be repeated, optional> \
--num-samples <number of builds to query, optional> \
--project <billing project>
Concrete example:
find_unexpected_passing_tests.py \
--builder "Win10 FYI x64 Release (NVIDIA)" \
--num-samples 10 \
--project luci-resultdb-dev
The --project argument can be any project you are associated with in the
Google Cloud console https://console.cloud.google.com/ (see drop-down menu in
the top left corner) that has sufficient permissions to query BigQuery.
"""
import argparse
import json
import os
import subprocess
QUERY_TEMPLATE = """\
WITH builds AS (
SELECT
id,
start_time,
builder.bucket,
builder.builder
FROM
`cr-buildbucket.chromium.builds`
WHERE
builder.builder = "{builder}"
# Ignore branch builders
AND (builder.bucket = "ci" OR builder.bucket = "try")
# Optimization
AND create_time >= TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 30 DAY)
)
SELECT * FROM builds ORDER BY start_time DESC LIMIT {num_samples}
"""
DEFAULT_BUILDERS = [
# CI
'Linux FYI Release (Intel HD 630)',
'Linux FYI Release (NVIDIA)',
'Mac FYI Release (Intel)',
'Mac FYI Retina Release (AMD)',
'Win10 FYI x64 Release (Intel HD 630)',
'Win10 FYI x64 Release (NVIDIA)',
]
NINJA_TARGET_PREFIXES = [
'chrome/test:telemetry_gpu_integration_test',
]
TEST_SUITE_PREFIXES = [
'/gpu_tests.gpu_process_integration_test.GpuProcessIntegrationTest.',
'/gpu_tests.hardware_accelerated_feature_integration_test.',
'/gpu_tests.info_collection_test.InfoCollectionTest.',
'/gpu_tests.pixel_integration_test.PixelIntegrationTest.',
'/gpu_tests.trace_integration_test.TraceIntegrationTest.',
('/gpu_tests.webgl_conformance_integration_test.'
'WebGLConformanceIntegrationTest.'),
]
def TryStripTestId(test_id):
"""Tries to strip off unnecessary information from a ResultDB test ID.
Args:
test_id: A ResultDB testId value.
Returns:
|test_id| with unnecessary information stripped off if possible.
"""
test_id = test_id.replace('ninja://', '')
for target in NINJA_TARGET_PREFIXES:
test_id = test_id.replace(target, '')
for subtest in TEST_SUITE_PREFIXES:
test_id = test_id.replace(subtest, '')
return test_id
def PrintUnexpectedPasses(unexpected_passes, args):
"""Prints out unexpected pass query results.
Args:
unexpected_passes: The output of GetUnexpectedPasses().
args: The parsed arguments from an argparse.ArgumentParser.
"""
for builder, passes in unexpected_passes.iteritems():
passed_all = {}
passed_some = {}
for suite, tests in passes.iteritems():
for test, num_passes in tests.iteritems():
if num_passes == args.num_samples:
passed_all.setdefault(suite, []).append(test)
else:
passed_some.setdefault(suite, []).append((test, num_passes))
# Alphabetize for readability.
for tests in passed_all.values():
tests.sort()
for tests in passed_some.values():
tests.sort()
print '##### %s #####' % builder
if passed_all:
print '----- Tests that passed in all runs -----'
for suite, tests in passed_all.iteritems():
print '%s:' % suite
for test in tests:
print ' %s' % test
print ''
if passed_some:
print '----- Tests that passed in some runs -----'
for suite, tests in passed_some.iteritems():
print '%s:' % suite
for (test, num_passes) in tests:
print ' %s: %d/%d' % (test, num_passes, args.num_samples)
print '\n\n'
def ConvertGpuToVendorName(gpu):
"""Converts a given GPU dimension string to a GPU vendor.
E.g. a GPU containing "8086" will be mapped to "Intel".
Args:
gpu: A string containing a GPU dimension
Returns:
A string containing the GPU vendor.
"""
if not gpu:
return 'No GPU'
elif '8086' in gpu:
return 'Intel'
elif '10de' in gpu:
return 'NVIDIA'
elif '1002' in gpu:
return 'AMD'
return gpu
def GetTestSuiteFromVariant(variant):
"""Gets a human-readable test suite from a ResultDB variant.
Args:
variant: A dict containing a variant definition from ResultDB
Returns:
A string containing the test suite.
"""
suite_name = variant.get('test_suite', 'default_suite')
gpu = variant.get('gpu')
os_dimension = variant.get('os')
gpu = ConvertGpuToVendorName(gpu)
return '%s on %s on %s' % (suite_name, gpu, os_dimension)
def GetUnexpectedPasses(builds):
"""Gets the unexpected test passes from the given builds.
Args:
builds: The output of GetBuildbucketIds().
Returns:
A dict in the following form:
{
builder (string): {
suite variant (string): {
test (string): num_passes (int),
},
},
}
"""
retval = {}
for builder, buildbucket_ids in builds.iteritems():
print 'Querying ResultDB for builder %s' % builder
cmd = [
'rdb',
'query',
'-json',
'-u', # Only get data for unexpected results.
]
for bb_id in buildbucket_ids:
cmd.append('build-%s' % bb_id)
with open(os.devnull, 'w') as devnull:
stdout = subprocess.check_output(cmd, stderr=devnull)
# stdout should be a newline-separated list of JSON strings.
for str_result in stdout.splitlines():
result = json.loads(str_result)
if 'testExoneration' not in result:
continue
if ('Unexpected passes' not in result['testExoneration']
['explanationHtml']):
continue
test_suite = GetTestSuiteFromVariant(
result['testExoneration']['variant']['def'])
test_id = TryStripTestId(result['testExoneration']['testId'])
retval.setdefault(builder, {}).setdefault(test_suite,
{}).setdefault(test_id, 0)
retval[builder][test_suite][test_id] += 1
return retval
def GetBuildbucketIds(args):
"""Gets the Buildbucket IDs for the given args.
Args:
args: The parsed arguments from an argparse.ArgumentParser.
Returns:
A dict of builder (string) to list of Buildbucket IDs (string).
"""
retval = {}
for builder in args.builders:
print 'Querying BigQuery for builder %s' % builder
query = QUERY_TEMPLATE.format(builder=builder, num_samples=args.num_samples)
cmd = [
'bq',
'query',
'--format=json',
'--project_id=%s' % args.project,
'--use_legacy_sql=false',
query,
]
with open(os.devnull, 'w') as devnull:
stdout = subprocess.check_output(cmd, stderr=devnull)
query_results = json.loads(stdout)
assert len(query_results)
for result in query_results:
retval.setdefault(builder, []).append(result['id'])
return retval
def ParseArgs():
parser = argparse.ArgumentParser(
description='Script to find tests which are unexpectedly passing, i.e. '
'whose test suppressions can probably be removed/relaxed.')
parser.add_argument('--project',
required=True,
help='A billing project to use for BigQuery queries.')
parser.add_argument('--builder',
action='append',
dest='builders',
default=[],
help='A builder to query results from. Can be specified '
'multiple times to use multiple builders. If omitted, '
'will use a default set of builders.')
parser.add_argument('--num-samples',
type=int,
default=100,
help='The number of recent builds to query.')
args = parser.parse_args()
assert args.num_samples > 0
args.builders = args.builders or DEFAULT_BUILDERS
return args
def main():
args = ParseArgs()
builds = GetBuildbucketIds(args)
unexpected_passes = GetUnexpectedPasses(builds)
PrintUnexpectedPasses(unexpected_passes, args)
if __name__ == '__main__':
main()
|
987,073 | 7720dae211df98d13df5a37fbbcb74157644c05a | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^backend/loginAction/', 'backend.views.loginAction'),
url(r'^backend/quitAction/', 'backend.views.quitAction'),
url(r'^backend/getNewsList/', 'backend.views.getNewsList'),
url(r'^backend/addNews/', 'backend.views.addNews'),
url(r'^backend/editNews/', 'backend.views.editNews'),
url(r'^backend/deleteNews/', 'backend.views.deleteNews'),
url(r'^backend/getNewsById/', 'backend.views.getNewsById'),
url(r'^backend/getProductList/', 'backend.views.getProductList'),
url(r'^backend/addProduct/', 'backend.views.addProduct'),
url(r'^backend/editProduct/', 'backend.views.editProduct'),
url(r'^backend/deleteProduct/', 'backend.views.deleteProduct'),
url(r'^backend/getProductById/', 'backend.views.getProductById'),
url(r'^backend/getCulList/', 'backend.views.getCulList'),
url(r'^backend/editCul/', 'backend.views.editCul'),
url(r'^backend/getCulById/', 'backend.views.getCulById'),
)
|
987,074 | c166f0379d819bc40e4afe84e0024e7c543510c1 | from os import environ
from pathlib import Path
import numpy as np
import torch
data_root_path = Path(environ.get('DATA_PATH', './data'))
def random_mask_from_state(x):
return torch.randint(0, 2, size=x.shape, device=x.device)
def mask_idx_to_mask(n, i):
i = np.asarray(i)
assert np.all(i < 2**n)
r = 2 ** np.arange(n - 1, -1, -1)
x = (i[..., None] % (2 * r)) // r
return x
def mask_to_mask_idx(mask):
mask = np.asarray(mask)
n = mask.shape[-1]
return (mask * 2**np.arange(n - 1, -1, -1)).sum(-1)
def test_mask_idx_to_mask():
assert mask_idx_to_mask(3, 0).tolist() == [0, 0, 0]
assert mask_idx_to_mask(3, 1).tolist() == [0, 0, 1]
assert mask_idx_to_mask(3, 2).tolist() == [0, 1, 0]
assert mask_idx_to_mask(3, 7).tolist() == [1, 1, 1]
assert mask_idx_to_mask(3, [1, 2]).tolist() == [[0, 0, 1], [0, 1, 0]]
def test_mask_to_mask_idx():
assert mask_to_mask_idx([0, 0, 0]).tolist() == 0
assert mask_to_mask_idx([[1, 0, 0], [1, 1, 1]]).tolist() == [4, 7]
|
987,075 | 1eec195a1c7c6642bc9e96609e68ca1874b2c135 | import SocketServer
import socket
class EchoHandler(SocketServer.BaseRequestHandler):
def handle(self):
print "Got connection from : " , self.client_address
data = "sandeep"
while len(data):
data = self.request.recv(1024)
print "Client Send: " + data
self.request.send(data)
print "Client left"
serverAddr = ("0.0.0.0", 9000)
server = SocketServer.TCPServer(serverAddr, EchoHandler)
server.serve_forever()
#Is this server multi-threaded?
#Code up the multi-threaded version of the Socket server
|
987,076 | 92de29fc65463d958007f3db582f32469cc6e4c4 | import time
import os.path
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# Pylint code to name mapping: https://github.com/janjur/readable-pylint-messages
from behave import * # pylint:disable=wildcard-import,unused-wildcard-import
from webdriver_manager.chrome import ChromeDriverManager
def setup_chrome():
# Needed to give the headless Chrome something to "draw" to
display = Display(visible=0, size=(800, 600))
display.start()
# Needed because we are the root user, and root needs `--no-sandbox`
# as per http://chromedriver.chromium.org/help/chrome-doesn-t-start
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
# Add debugging options, logs for each run will overwrite the file here
service_log_path = os.path.join(os.path.expanduser("~"), "chromedriver.log")
service_args = ['--verbose']
return webdriver.Chrome(ChromeDriverManager().install(),
options=options,
service_args=service_args,
service_log_path=service_log_path)
@given(u'we are browsing amazon.com')
def navigate_step_impl(context):
browser = setup_chrome()
context.browser = browser
browser.get("https://www.amazon.com")
time.sleep(4)
assert "Amazon.com" in browser.title
@when(u'we search for "{target}"')
def search_step_impl(context, target):
browser = context.browser
searchbox = browser.find_element_by_name("field-keywords")
searchbox.clear()
searchbox.send_keys(target)
searchbox.send_keys(Keys.RETURN)
time.sleep(4)
assert "Amazon.com" in browser.title
@then(u'we should see a "{result}"')
def result_step_impl(context, result):
browser = context.browser
assert result in browser.page_source |
987,077 | 7fa161da0010b392af7b449c5590c1146f97de90 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
北京中学信息爬取
"""
import csv
import requests
import pymongo
from gevent.pool import Pool
import json
import re
from copy import deepcopy
from lxml import etree
import sys
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
'Host': 'map.beijing.gov.cn'
}
connect = pymongo.MongoClient('127.0.0.1', 27017)
def run(start_url):
task_pool = Pool(20)
headers = deepcopy(HEADERS)
headers['X-Requested-With'] = 'XMLHttpRequest'
response = requests.get(url=start_url, headers=HEADERS)
content = response.content
region_info = json.loads(str(content, encoding='utf-8'))
base_url = 'https://map.beijing.gov.cn/api/place_list_for_category.json?categoryId=zx®ionId={0}'
for region in region_info:
url = base_url.format(region['regionId'])
task_pool.apply_async(region_handle, args=(url, headers))
task_pool.join()
def region_handle(url, headers):
response = requests.get(url=url, headers=headers)
data = json.loads(str(response.content, encoding='utf-8'))
school_handle(data)
def school_handle(schools):
task_pool = Pool(20)
for school in schools:
task_pool.apply_async(school_detail_handle, args=(school,))
task_pool.join()
def school_detail_handle(school):
print(school)
base_url = 'https://map.beijing.gov.cn/place?placeId={0}&categoryId=zx'
url = base_url.format(school['placeId'])
response = requests.get(url, headers=HEADERS)
content = response.content
html = etree.HTML(content)
tr = html.xpath('//table//tr[last()]')
text = tr[0].xpath('./td/text()')
if text:
content = text[0]
nature = re.search(r'性质:(.*?)(?=\()', content)
school_section = re.search(r'学段:(.*?)(?=\()', content)
school_size = re.search(r'学校规模:(.*?)(?=\()', content)
if nature:
school['nature'] = nature.group()
if school_section:
school['school_section'] = school_section.group()
if school_size:
school['school_size'] = school_size.group()
connect['Data']['School'].insert(school)
if __name__ == '__main__':
run("https://map.beijing.gov.cn/api/district_list_for_category.json?categoryId=zx")
# school_detail_handle({'placeId': '5ba765b97e4e7316d93853ae'})
|
987,078 | 6ac88feab39667fbd791cb195fd0e3684f7db41a | import xlsxwriter
from use_def import handle_text
from use_class import *
from codon_table import *
from codon import *
#from enc import *
from rscu import RSCU
#from basic_index import BASIC_INDEX
import sys
file = sys.argv[1] #
f = open(file,"r")
#f = open("cds","r") #
#file = "cds"
gene_text = f.read()
gene_objs = handle_text(gene_text) #
codon_table_obj = CODON_TABLE()
codon_table_name = "Standard_codons_table"
ssu = ['CCT','CGT','GCT','GGT']
wwu = ['AAC','ATC','TAC','TTC']
ssc = ['CCC','CGC','GCC','GGC']
wwc = ['AAT','ATT','TAT','TTT']
huge_sequence = ''
for gene_obj in gene_objs:
huge_sequence+=gene_obj.sequence
huge_gene = GENE(huge_sequence,'huge gene')
huge_gene.rscu_obj = RSCU(huge_gene,codon_table_obj,codon_table_name)
huge_gene.rscu_obj.compute()
SSU = 0
WWU = 0
SSC = 0
WWC = 0
for codon in ssu:
SSU+=huge_gene.rscu_obj.rscu_dict[codon]
for codon in wwu:
WWU+=huge_gene.rscu_obj.rscu_dict[codon]
for codon in ssc:
SSC+=huge_gene.rscu_obj.rscu_dict[codon]
for codon in wwc:
WWC+=huge_gene.rscu_obj.rscu_dict[codon]
p2 = (WWC+SSU)/(WWC+WWU+SSC+SSU)
print(p2)
f.close()
|
987,079 | 64607ebda6c5a8318bf2a4a9891d076d2f7253f4 | from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import random
pt_cloud = []
with open("pt_cloud_experiment.asc") as file:
next(file)
next(file)
for row in file:
pt_cloud.append(row.strip().split(" "))
pt_cloud = np.array(pt_cloud, dtype=np.float64)
pt_cloud = pt_cloud[random.sample(range(1, pt_cloud.shape[0]), 200000), :]
#col = np.array(pt_cloud[:, 3:6], dtype="U10")
fig = plt.figure()
ax = fig.gca(projection='3d')
s = ax.scatter(pt_cloud[:, 0], pt_cloud[:, 1], pt_cloud[:, 2], c=pt_cloud[:, 3:6], s=2)
plt.show()
print(pt_cloud.shape)
|
987,080 | 2a1dfdc7dbe3ae29a9db7c04f2748c07e47c28d4 | import re
phone_list = ["555-555-5555","555 555 5555","555.555.5555",
"(555) 555-5555","(555)555-5555","(555)555.5555"]
pattern = r'\D'
for phone in phone_list:
phone_num = re.sub(pattern, "", phone)
print "Phone Num : ", phone_num
|
987,081 | 4d858878bb6ec3ea37361e003ddea433cf86aa36 | #!/usr/bin/env python
#######################################################################
# This file is part of Fusuma website management system.
#
# Copyright (c) hylom <hylomm at gmail.com>, 2008.
#
# This file is released under the GPL.
#
# $Id: TemplateMan.py,v 1.6 2009/01/04 18:27:48 hylom Exp $
#######################################################################
"""
This is Fusuma's Template management module.
"""
__revision__ = "$Revision: 1.6 $"
import sys
import os
import re
from string import Template
VERSION = "0.0.1"
VERSION_DATE = VERSION + " 09/26/2008"
VERSION_SPLIT = tuple(VERSION.split('.'))
class TemplateMan(object):
"""
This is Fusuma's template management class.
"""
def get_template(self, template_name):
"""
return template string.
@param template_name: template name string
@type template_name: string
"""
if( template_name in self._template_of):
templ = self._template_of[template_name]
return Template( self._proc_macros(templ) )
return Template("")
def _proc_macros(self, string):
"""
preprocess template macros.
"""
return self._macro_insert.sub( self._proc_macro_insert, string )
def _proc_macro_insert(self, matchobj):
return self._template_of.get(matchobj.group(1), matchobj.group(0))
def add_template(self, key, template):
self._template_of[key] = template
def __init__(self):
"""
Initialize Template Manager.
"""
template_of = {}
self._macro_include = re.compile(r"\[%include\((.*?)\)%\]")
self._macro_insert = re.compile(r"\[%insert\((.*?)\)%\]")
#const
template_of["http_header"] = """Content-type: text/html; charset=utf-8;
"""
template_of["html_header"] = """
<?xml version="1.0" encoding="UTF_8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="ja" lang="ja">
<head>
<title>${title}</title>
<meta http-equiv="content-script-type" content="text/javascript" />
<meta http-equiv="Content-Style-Type" content="text/css" />
${heads}
</head>
<body>
"""
template_of["html_footer"] = """
</body></html>
"""
template_of["info_loginerror"] = """
<div class="login-error">
<p>login error</p>
</div>
"""
template_of["import_rss"] = """[%insert(http_header)%]
[%insert(html_header)%]
[%insert(general_header)%]
<hr>
${body}
<hr>
[%insert(html_footer)%]
"""
template_of["filer"] = """[%insert(http_header)%]
[%insert(html_header)%]
[%insert(general_header)%]
<hr>
<form method="post" enctype="multipart/form-data" action="${SCRIPT_NAME}/filer/${file_rurl}">
<div class="create">
<input type="text" name="newfilename" class="newfilename" size="60" />
<input type="submit" name="op" value="newfile" />
<input type="submit" name="op" value="newdir" />
</div>
</form>
<hr>
${filer_body}
<hr>
[%insert(html_footer)%]
"""
template_of["editor"] = """[%insert(http_header)%]
[%insert(html_header)%]
[%insert(general_header)%]
<hr>
<form method="post" enctype="multipart/form-data" action="${SCRIPT_NAME}/editor/${PATH}">
<div class="textarea">
<textarea name="text" class="text" cols="80" rows="20" wrap="soft">${text}</textarea>
</div>
<div class="buttons">
<input type="submit" name="op" value="save" />
</div>
</form>
<hr>
<a href="${FILER_URL}">back to filer</a>
[%insert(html_footer)%]
"""
#########################################################################
template_of["editor_flavours"] = """[%insert(http_header)%]
[%insert(html_header)%]
[%insert(general_header)%]
<hr>
<form method="post" enctype="multipart/form-data" action="${SCRIPT_NAME}/editor_flavours/${PATH}">
<div class="textarea">
<textarea name="text" class="text" cols="80" rows="20" wrap="soft">${text}</textarea>
</div>
<div class="buttons">
<input type="submit" name="op" value="save" />
</div>
</form>
<hr>
<a href="${FILER_URL}">back to filer</a>
[%insert(html_footer)%]
"""
template_of["editor_css"] = """[%insert(http_header)%]
[%insert(html_header)%]
[%insert(general_header)%]
<hr>
<form method="post" enctype="multipart/form-data" action="${SCRIPT_NAME}/editor_css/${PATH}">
<div class="textarea">
<textarea name="text" class="text" cols="80" rows="20" wrap="soft">${text}</textarea>
</div>
<div class="buttons">
<input type="submit" name="op" value="save" />
</div>
</form>
<hr>
<a href="${FILER_URL}">back to filer</a>
[%insert(html_footer)%]
"""
#########################################################################
template_of["login"] = """[%insert(http_header)%]
[%insert(html_header)%]
<script type="text/javascript" src="/jq/jquery-1.3.2.min.js"></script>
<script type="text/javascript" src="/jq/jquery.sha1.js"></script>
<script type="text/javascript" src="/js/login.js"></script>
${error_message}
<div class="login-form">
<form action="${SCRIPT_NAME}/login/" method="post" id="fsm-login-form">
<input type="hidden" name="return_to" value="/" />
<input type="hidden" name="login" value="1" />
<input type="hidden" name="cr_id" value="${cr_id}" />
<input type="hidden" name="cr_key" value="${cr_key}" />
<input type="hidden" name="cr_auth" value="off" />
<div id="login-form-loginname">
<span>login:</span><input type="text" name="loginname" value="" />
</div>
<div id="login-form-password">
<span>password:</span><input type="password" name="password" />
</div>
<div id="login-form-preserve">
<span><input type="checkbox" id="preserve" name="preserve" /><label for="preserve">preserve login status</label></span>
</div>
<div id="login-form-usecr">
<span><input type="checkbox" id="use_challenge" name="use_challenge" disabled="disabled" /><label for="use_challenge">use challenge & response auth.</label></span>
</div>
<div id="login-form-submit">
<input type="submit" value="login" />
</div>
</form>
</div>
[%insert(html_footer)%]
"""
template_of["login_succeed"] = """
<html>
<head>
<meta http-equiv="refresh" content="0;url=${url}">
</head>
</html>
"""
template_of["root"] = """[%insert(http_header)%]
[%insert(html_header)%]
[%insert(header_bar)%]
[%insert(html_footer)%]
"""
template_of["header_bar"] = """
<div id="header-bar">
<ul>
<li><span class="username">${user_name}</span></li>
<li><a href="#" id="link-to-stories">stories</a></li>
<li><a href="${SCRIPT_NAME}/new_story/" id="link-to-new-story">new story</a></li>
<li><a href="#" id="link-to-settings">settings</a></li>
<li><a href="${SCRIPT_NAME}/filer/" id="link-to-filer">filer</a></li>
<li><a href="${SCRIPT_NAME}/filer_css/" id="link-to-filer">css editor</a></li>
<li><a href="${SCRIPT_NAME}/filer_flavours/" id="link-to-filer">flavour editor</a></li>
<li><a href="${SCRIPT_NAME}/fetch_rss/" id="link-to-logout">Fetch RSS</a></li>
<li><a href="${SCRIPT_NAME}/lcomment/" id="link-to-lcomment">LiteComment</a></li>
<li><a href="${SCRIPT_NAME}/logout/" id="link-to-logout">logout</a></li>
</ul>
</div>
"""
template_of["general_header"] = template_of["header_bar"]
self._template_of = template_of
|
987,082 | 823beed0983176628e2bd097312f71ad2733d5bd | # Generated by Django 3.1.1 on 2020-11-06 17:33
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20201106_2118'),
]
operations = [
migrations.AlterField(
model_name='account',
name='followers',
field=models.ManyToManyField(related_name='u_followers', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='account',
name='following',
field=models.ManyToManyField(related_name='u_following', to=settings.AUTH_USER_MODEL),
),
]
|
987,083 | 590e6a53b4eaf541daf4dacbcf79fab4671b1236 | from datetime import datetime
from flask import request, render_template, session, redirect, url_for, abort
from enums.enums import UserRole, EventStatus, Gender
from models.event import Event
from models.forms.edit_settings_form import EditSettingsForm, EditSettingsNoPasswordForm
from models.forms.event_form import EventForm
from models.forms.register_form import RegisterForm
from models.user import User
from services.errors import ESError
from services.invite_service import InviteService
from services.media_service import MediaService
from services.pass_card_service import PassCardService
from services.reward_service import RewardService
from utils.db_wrapper import DBWrapper
from view.config import Config as ViewConfig
from view.loc import Loc
from utils.view_data_factory import ProfileViewCreator
class Controller:
def __init__(self, app, db):
self.app = app
self.db = DBWrapper(db)
self.invites = InviteService(self.db)
self.media = MediaService(app)
self.rewards = RewardService(self.db, app)
self.pass_cards = PassCardService(self.db)
@classmethod
def _getUser(cls):
return User.query.filter_by(login=session["login"]).first()
@classmethod
def _getEvent(cls, event_id):
return Event.query.filter_by(id=event_id).first()
def registerInvite(self):
if request.method == 'POST':
if 'invite' in request.form:
invite = request.form["invite"]
if self.invites.hasInvite(invite):
session["invite"] = invite
return redirect(url_for("register"))
else:
error = "Неправильный или использованный инвайт"
return render_template("register_invite.html", error=error)
return render_template("register_invite.html")
def registerUser(self):
if "invite" not in session:
return redirect(url_for("register_invite"))
invite = session["invite"]
form = RegisterForm(request.form)
login = str(form.login.data)
user = User.query.filter_by(login=login).first()
if user is not None:
error = "Такой юзер уже существует"
return render_template("register.html", error=error)
if not self.invites.tryUseInvite(invite):
error = "Неправильный или использованный инвайт"
return render_template("register.html", error=error)
else:
session.pop("invite")
if form.validate():
user = User(
login=form.login.data,
password=str(form.password.data),
gender=form.gender.data
)
user.image_big = "img/male256.png"
if user.gender == "Female":
user.image_big = "img/female256.png"
self.db.add(user)
self.db.commit()
session["logged_in"] = True
session["login"] = user.login
session["admin"] = user.role == UserRole.ADMIN.name
return redirect(url_for('index'))
return render_template('register.html')
def loginUser(self):
login = request.form['login']
password_candidate = request.form['password']
user = User.query.filter_by(login=login).first()
if user is not None:
password = user.password
if not password:
unregistered_password = user.unregistered_password.password
password = unregistered_password
if password == password_candidate:
user.password = password
if user.unregistered_password:
self.db.delete(user.unregistered_password)
self.db.commit()
session["logged_in"] = True
session["login"] = login
session["admin"] = user.role == UserRole.ADMIN.name
return redirect(url_for("index"))
else:
error = "Wrong password"
return render_template("login.html", error=error)
else:
error = "No user"
return render_template("login.html", error=error)
def logoutUser(self):
session.clear()
return redirect(url_for('login'))
def getIndex(self):
user = self._getUser()
return redirect(url_for('profile', user_name=user.login))
def get_user_profile(self, user_name):
user = User.query.filter_by(login=user_name).first()
myself = self._getUser()
if user is None:
return redirect(url_for('profile', user_name=myself.login))
view = ProfileViewCreator.create(myself, user)
config = ViewConfig(True)
return self._renderUserTemplate('profile.html', view=view, config=config, loc=Loc("ru"))
def getAllUsers(self):
users = User.query.all()
return self._renderUserTemplate('all_users.html', users=users)
def getCreatedEvents(self):
user = self._getUser()
events = user.events_created.order_by('date_start')
return self._renderUserTemplate('events.html', events=events, created=True)
def getPublishedEvents(self):
events = Event.query.filter_by(published=True).order_by('date_start')
return self._renderUserTemplate('events.html', events=events)
def getParticipateEvents(self):
user = self._getUser()
events_participate = user.events_participate.all()
events_wait = user.events_wait.all()
events = events_participate + events_wait
return self._renderUserTemplate('events.html', events=events, participate=True)
def getInvites(self):
if request.method == 'POST':
self.invites.createInvites(100, 10)
return redirect(url_for('invites'))
invites = self.invites.getInvites()
return self._renderUserTemplate('invites.html', invites=invites)
def createEvent(self):
if request.method == 'GET':
return self._renderUserTemplate("event_create.html", image_big="img/event.png")
if request.method == 'POST':
form = EventForm(request.form)
rewards = request.form.getlist("rewards")
for r in rewards:
if r:
form.rewards.append_entry(r)
if form.validate():
user = self._getUser()
event = Event(
author_id=user.id,
title=form.title.data,
description_short=form.description_short.data,
description=form.description.data,
date_start=form.date_start.data,
max_participants=form.max_participants.data,
best_player_reward=form.best_player_reward.data,
image_big="img/event.png"
)
event.rewards = []
rewards = form.rewards.data
for r in rewards:
event.rewards.append(int(r))
self.db.add(event)
self.db.commit()
return redirect(url_for("event", event_id=event.id))
return redirect(url_for("events"))
def editEvent(self, event_id):
if request.method == 'GET':
event = self._getEvent(event_id)
if event:
return self._renderUserTemplate("event_edit.html", event=event)
else:
return redirect(url_for("events"))
if request.method == 'POST':
if "event_id" not in request.form:
return redirect(url_for("events"))
event_id = request.form["event_id"]
form = EventForm(request.form)
rewards = request.form.getlist("rewards")
for r in rewards:
if r:
form.rewards.append_entry(r)
if form.validate():
event = Event.query.filter_by(id=event_id).first()
if event:
event.title = form.title.data
event.description_short = form.description_short.data
event.description = form.description.data
event.date_start = form.date_start.data
event.max_participants = form.max_participants.data
event.best_player_reward = form.best_player_reward.data
event.rewards = []
rewards = form.rewards.data
for r in rewards:
event.rewards.append(int(r))
self.db.commit()
return redirect(url_for("event", event_id=event.id))
return redirect(url_for("events"))
def deleteEvent(self):
if 'event_id' not in request.form:
return abort(404)
event_id = request.form["event_id"]
event = self._getEvent(event_id)
if event:
self.db.delete(event)
self.db.commit()
return redirect(url_for("events"))
return redirect(url_for("event", event_id=event_id))
def getEvent(self, event_id):
event = self._getEvent(event_id)
if event is None:
return abort(404)
return self._renderUserTemplate('event.html', event=event)
def participateEvent(self, event_id):
if request.method == 'POST':
event = self._getEvent(event_id)
if event is not None:
user = self._getUser()
event.wait_list.append(user)
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def leaveEvent(self, event_id):
if request.method == 'POST':
event = self._getEvent(event_id)
if event is not None:
user = self._getUser()
users = event.participants.all()
for u in users:
if u.id == user.id:
event.participants.remove(u)
break
users = event.wait_list.all()
for u in users:
if u.id == user.id:
event.wait_list.remove(u)
break
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def publishEvent(self, event_id):
if request.method == 'POST':
event = self._getEvent(event_id)
if event is not None:
event.published = True
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def unpublishEvent(self, event_id):
if request.method == 'POST':
event = self._getEvent(event_id)
if event is not None:
event.published = False
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def changeEventState(self):
if 'event_id' not in request.form \
or 'status' not in request.form:
return abort(404)
event_id = request.form["event_id"]
status = request.form["status"]
event = self._getEvent(event_id)
if event is not None:
if EventStatus.HasName(status):
if status == EventStatus.REWARDED.name and event.status != EventStatus.FINISHED.name:
return abort(404)
event.status = status
if status != EventStatus.REWARDED.name:
results = event.results.all()
self.db.delete(results)
if status == EventStatus.FINISHED.name:
if event.result_file is not None:
self.media.removeEventResult(event.result_file)
result_file = self.media.uploadEventResult(request.files["result"], event.id)
event.result_file = result_file
self.rewards.collectResults(event, self.invites)
elif status == EventStatus.REWARDED.name:
self.rewards.giveRewards(event)
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def addParticipantToEvent(self):
if 'event_id' not in request.form \
or 'user_id' not in request.form:
return ""
event_id = request.form['event_id']
user_id = request.form['user_id']
event = self._getEvent(event_id)
if event is not None:
user = User.query.filter_by(id=user_id).first()
if user is not None:
users = event.participants.all()
if user not in users:
event.participants.append(user)
users = event.wait_list.all()
if user in users:
event.wait_list.remove(user)
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def removeParticipantFromEvent(self):
if 'event_id' not in request.form \
or 'user_id' not in request.form:
return ""
event_id = request.form['event_id']
user_id = request.form['user_id']
event = self._getEvent(event_id)
if event is not None:
user = User.query.filter_by(id=user_id).first()
if user is not None:
users = event.participants.all()
if user in users:
event.participants.remove(user)
users = event.wait_list.all()
if user not in users:
event.wait_list.append(user)
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def uploadAvatar(self):
if request.method == 'POST':
files = request.files
if 'image' in files:
user = self._getUser()
if user.image_big:
self.media.removeUserImage(user.image_big, user.id)
image_big = self.media.uploadUserImage(files['image'], user.id)
if image_big is not None:
user.image_big = image_big
self.db.commit()
return redirect(url_for('profile', user_name=session["login"]))
def uploadEventAvatar(self, event_id):
if request.method == 'POST':
files = request.files
if 'image' in files:
event = self._getEvent(event_id)
if event is not None:
if event.image_big:
self.media.removeEventImage(event.image_big, event.id)
user = self._getUser()
image_big = self.media.uploadEventImage(files['image'], event.id)
if image_big is not None:
event.image_big = image_big
self.db.commit()
return redirect(url_for('event', event_id=event_id))
def getSettings(self):
user = self._getUser()
if request.method == "POST":
other_user = User.query.filter_by(login=request.form["login"]).first()
if other_user is not None and user != other_user:
error = "Такой юзер уже существует"
return self._renderUserTemplate("settings.html", error=error)
if request.form["password"]:
form = EditSettingsForm(request.form)
if form.validate():
user.login = str(form.login.data)
user.password = str(form.password.data)
user.gender = 'Male'
gender = str(form.gender.data)
if gender == 'Male':
user.gender = Gender.MALE.name
else:
return self._renderUserTemplate("settings.html", error=form.errors_str())
else:
form = EditSettingsNoPasswordForm(request.form)
if form.validate():
user.login = str(form.login.data)
user.gender = 'Male'
gender = str(form.gender.data)
if gender == 'Male':
user.gender = Gender.MALE.name
else:
return self._renderUserTemplate("settings.html", error=form.errors_str())
self.db.commit()
session["login"] = user.login
return self._renderUserTemplate('settings.html')
def createPassCard(self):
if 'user_id' not in request.form:
return abort(404)
user_id = request.form["user_id"]
user = User.query.filter_by(id=user_id).first()
if not user:
return abort(404)
now = datetime.utcnow()
res = self.pass_cards.tryCreatePassCard(now, user, 1, 8)
if isinstance(res, ESError):
return redirect(url_for('profile', user_name=user.login, error=res))
if res:
self.db.add(res)
self.db.commit()
return redirect(url_for('profile', user_name=user.login))
def usePassCard(self):
if 'user_id' not in request.form \
or "event_id" not in request.form:
return abort(404)
user_id = request.form["user_id"]
user = User.query.filter_by(id=user_id).first()
if not user:
return abort(404)
event_id = request.form["event_id"]
event = self._getEvent(event_id)
if not event:
return abort(404)
now = datetime.utcnow()
res = self.pass_cards.tryUsePassCard(now, user, event)
if isinstance(res, ESError):
return redirect(url_for('profile', user_name=user.login, error=res))
self.db.commit()
return redirect(url_for('profile', user_name=user.login))
@staticmethod
def _errorsToString(form):
error = ""
for k, v in form.errors.items():
error += v[0] + '\n'
return
def _renderUserTemplate(self, path: str, **kwargs: object):
viewer = self._getUser()
if 'user' not in kwargs:
return render_template(path, viewer=viewer, user=viewer, **kwargs)
return render_template(path, viewer=viewer, **kwargs)
|
987,084 | 31d6bf0e463098818f6396688700536f3929c2a2 | # generated with class generator.python.order_factory$Factory
from marketsim import registry
from marketsim.gen._out._ifunction._ifunctionside import IFunctionSide
from marketsim.gen._out._iobservable._iobservableiorder import IObservableIOrder
from marketsim.gen._out._ifunction._ifunctionfloat import IFunctionfloat
from marketsim.gen._out._iorder import IOrder
from marketsim.gen._out._observable._observableiorder import ObservableIOrder
@registry.expose(["Order", "FixedBudget"])
class FixedBudget_SideFloat(ObservableIOrder,IObservableIOrder):
""" **Factory creating fixed budget orders**
Fixed budget order acts like a market order
but the volume is implicitly given by a budget available for trades.
Internally first it sends request.EvalVolumesForBudget
to estimate volumes and prices of orders to sent and
then sends a sequence of order.ImmediateOrCancel to be sure that
cumulative price of trades to be done won't exceed the given budget.
Parameters are:
**side**
function defining side of orders to create
**budget**
function defining budget on which it may send orders at one time
"""
def __init__(self, side = None, budget = None):
from marketsim.gen._out.side._sell import Sell_ as _side_Sell_
from marketsim.gen._out._iorder import IOrder
from marketsim.gen._out._constant import constant_Float as _constant_Float
from marketsim.gen._out._observable._observableiorder import ObservableIOrder
from marketsim import deref_opt
ObservableIOrder.__init__(self)
self.side = side if side is not None else deref_opt(_side_Sell_())
self.budget = budget if budget is not None else deref_opt(_constant_Float(1000.0))
@property
def label(self):
return repr(self)
_properties = {
'side' : IFunctionSide,
'budget' : IFunctionfloat
}
def __repr__(self):
return "FixedBudget(%(side)s, %(budget)s)" % dict([ (name, getattr(self, name)) for name in self._properties.iterkeys() ])
def bind_ex(self, ctx):
if self.__dict__.get('_bound_ex', False): return
self.__dict__['_bound_ex'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.__dict__['_ctx_ex'] = ctx.updatedFrom(self)
if hasattr(self, '_internals'):
for t in self._internals:
v = getattr(self, t)
if type(v) in [list, set]:
for w in v: w.bind_ex(self.__dict__['_ctx_ex'])
else:
v.bind_ex(self.__dict__['_ctx_ex'])
self.side.bind_ex(self._ctx_ex)
self.budget.bind_ex(self._ctx_ex)
self.bind_impl(self.__dict__['_ctx_ex'])
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.bind_ex(self.__dict__['_ctx_ex'])
self.__dict__['_processing_ex'] = False
def reset_ex(self, generation):
if self.__dict__.get('_reset_generation_ex', -1) == generation: return
self.__dict__['_reset_generation_ex'] = generation
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
if hasattr(self, '_internals'):
for t in self._internals:
v = getattr(self, t)
if type(v) in [list, set]:
for w in v: w.reset_ex(generation)
else:
v.reset_ex(generation)
self.side.reset_ex(generation)
self.budget.reset_ex(generation)
self.reset()
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.reset_ex(generation)
self.__dict__['_processing_ex'] = False
def typecheck(self):
from marketsim import rtti
from marketsim.gen._out._ifunction._ifunctionside import IFunctionSide
from marketsim.gen._out._ifunction._ifunctionfloat import IFunctionfloat
rtti.typecheck(IFunctionSide, self.side)
rtti.typecheck(IFunctionfloat, self.budget)
def registerIn(self, registry):
if self.__dict__.get('_id', False): return
self.__dict__['_id'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
registry.insert(self)
self.side.registerIn(registry)
self.budget.registerIn(registry)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.registerIn(registry)
if hasattr(self, '_internals'):
for t in self._internals:
v = getattr(self, t)
if type(v) in [list, set]:
for w in v: w.registerIn(registry)
else:
v.registerIn(registry)
self.__dict__['_processing_ex'] = False
def __call__(self, *args, **kwargs):
from marketsim.gen._intrinsic.order.meta.fixed_budget import Order_Impl
side = self.side()
if side is None: return None
budget = self.budget()
if budget is None: return None
return Order_Impl(side, budget)
def bind_impl(self, ctx):
pass
def reset(self):
pass
def FixedBudget(side = None,budget = None):
from marketsim.gen._out._ifunction._ifunctionside import IFunctionSide
from marketsim.gen._out._ifunction._ifunctionfloat import IFunctionfloat
from marketsim import rtti
if side is None or rtti.can_be_casted(side, IFunctionSide):
if budget is None or rtti.can_be_casted(budget, IFunctionfloat):
return FixedBudget_SideFloat(side,budget)
raise Exception('Cannot find suitable overload for FixedBudget('+str(side) +':'+ str(type(side))+','+str(budget) +':'+ str(type(budget))+')')
|
987,085 | baef19784218c93db4463b90f75d80c5bf872b51 | from collections import Counter
most_frequent=lambda data:Counter(data).most_common(1)[0]
|
987,086 | 9a49b85212d0174b7d235d0cbfc4b8eba8e859bb | """
The :class:`.Session` class provides TinyAPI's core functionality. It manages the authentication cookies and token for all requests to TinyLetter's undocumented API.
"""
import requests
import re
import json
from .draft import Draft
URL = "https://app.tinyletter.com/__svcbus__/"
DEFAULT_MESSAGE_STATUSES = [
"sent",
"sending",
]
token_pat = re.compile(r'csrf_token="([^"]+)"')
def get_cookies_and_token():
res = requests.get("https://app.tinyletter.com/")
token = re.search(token_pat, res.content.decode("utf-8")).group(1)
return (res.cookies, token)
def create_payload(service, data, token):
return json.dumps([[[service, data]], [], token])
def fmt_paging(offset, count):
if offset == None and count == None:
return None
return "{0}, {1}".format(offset or 0, count)
class Session(object):
"""An authenticated tinyletter.com session."""
def __init__(self, username, password=False):
"""Returns a logged-in session."""
self.username = username
self.cookies, self.token = get_cookies_and_token()
self._login(password)
def _request(self, service, data):
payload = create_payload(service, data, self.token)
res = requests.post(URL,
cookies=self.cookies,
data=payload,
headers={'Content-Type': 'application/octet-stream'})
self.cookies = res.cookies
return res
def request(self, service, data):
"""
Makes a call to TinyLetter's __svcbus__ endpoint.
"""
_res = self._request(service, data)
res = _res.json()[0][0]
if res["success"] == True:
return res["result"]
else:
err_msg = res["errmsg"]
raise Exception("Request not successful: '{0}'".format(err_msg))
def _login(self, password):
req_data = [self.username, password, None, None, None, None ]
try:
self.request("service:User.loginService", req_data)
except:
raise Exception("Login not successful.")
def get_profile(self):
return self.request("service:User.currentUser", [])
def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):
"""Returns the number of messages your account has sent.
``DEFAULT_MESSAGE_STATUSES`` is set to ``[ "sent", "sending" ]``.
Other possible statuses include "draft", "failed_review", "failed_disabled", and "failed_schedule".
"""
return self.request("count:Message", [{"status": statuses}])
def get_messages(self,
statuses=DEFAULT_MESSAGE_STATUSES,
order="sent_at desc",
offset=None,
count=None,
content=False):
"""Returns a list of messages your account sent.
Messages are sorted by ``order``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each message, e.g., ``total_opens``, ``open_rate``, ``total_clicks``, ``unsubs``, ``soft_bounces``. If ``content=True``, the returned data will also include HTML content of each message.
"""
req_data = [ { "status": statuses }, order, fmt_paging(offset, count) ]
service = "query:Message.stats"
if content: service += ", Message.content"
return self.request(service, req_data)
def get_drafts(self, **kwargs):
"""Same as Session.get_messages, but where ``statuses=["draft"]``."""
default_kwargs = { "order": "updated_at desc" }
default_kwargs.update(kwargs)
return self.get_messages(statuses=["draft"], **default_kwargs)
def get_message(self, message_id):
"""Return stats *and* message content for a given message."""
req_data = [ str(message_id) ]
return self.request("find:Message.stats, Message.content", req_data)
def count_urls(self):
"""Returns the total number of URLs included in your messages"""
return self.request("count:Message_Url", [ None ])
def get_urls(self, order="total_clicks desc", offset=None, count=None):
"""Returns a list of URLs you've included in messages.
List is sorted by ``total_clicks``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items.
"""
req_data = [ None, order, fmt_paging(offset, count) ]
return self.request("query:Message_Url", req_data)
def get_message_urls(self, message_id, order="total_clicks desc"):
"""Returns a list of URLs you've included in a specific message.
List is sorted by ``total_clicks``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items.
"""
req_data = [ { "message_id": str(message_id) }, order, None ]
return self.request("query:Message_Url", req_data)
def count_subscribers(self):
"""Returns your newsletter's number of subscribers."""
return self.request("count:Contact", [ None ])
def get_subscribers(self,
order="created_at desc",
offset=None,
count=None):
"""Returns a list of subscribers.
List is sorted by most-recent-to-subsribe, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each subscriber, e.g., ``total_sent``, ``total_opens``, ``total_clicks``.
"""
req_data = [ None, order, fmt_paging(offset, count)]
return self.request("query:Contact.stats", req_data)
def get_subscriber(self, subscriber_id):
"""Returns data corresponding to a specific subcriber."""
req_data = [ str(subscriber_id) ]
return self.request("find:Contact.stats", req_data)
def create_draft(self):
"""Create a new draft message."""
return Draft(self)
def edit_draft(self, message_id):
"""Fetch a specific draft to be edited."""
return Draft(self, message_id).fetch()
|
987,087 | 2b505005709fe6dd042daa080c72a26a71467da5 | from .customization import *
|
987,088 | b7f3e527ebd7ad313b0908723783b50d0d395a85 | import logging
import threading
from typing import List
from django.core.mail import EmailMessage
from mailer.exceptions import EmailError
logger = logging.getLogger(__name__)
class EmailSender(threading.Thread):
def __init__(self, messages: List[EmailMessage]):
super().__init__()
for message in messages:
if not (message and isinstance(message, EmailMessage)):
raise EmailError()
self.messages = messages
def run(self):
self.send_email_messages()
def send_email_messages(self):
for message in self.messages:
message.send()
logger.info('Message to {0} has been successfully sent'.format(message.to), )
|
987,089 | 956ecac299b5ff44d28b5a631978a16724531e00 | import numpy as np
from pymoo.model.survival import Survival, split_by_feasibility
from pymoo.util.mathematics import Mathematics
from pymoo.util.non_dominated_sorting import NonDominatedSorting
from pymoo.util.randomized_argsort import randomized_argsort
class RankAndCrowdingSurvival(Survival):
def _do(self, pop, n_survive, D=None, **kwargs):
# split by feasibility
feasible, infeasible = split_by_feasibility(pop)
# final result that contains indices, rank and crowding of surviving individuals
survivors = []
crowding = []
rank = []
if len(feasible) > 0:
# calculate rank only of feasible solutions
F = pop.F[feasible, :]
fronts = NonDominatedSorting().do(F, n_stop_if_ranked=n_survive)
# go through all fronts except the last one
for k, front in enumerate(fronts):
# calculate the crowding distance of the front
#crowding_of_front = calc_crowding_distance(F[front, :])
crowding_of_front = calc_crowding_distance(F[front, :])
# current front sorted by crowding distance if splitting
if len(survivors) + len(front) > n_survive:
I = randomized_argsort(crowding_of_front, order='descending', method='numpy')
I = I[:(n_survive - len(survivors))]
# otherwise take the whole front
else:
I = np.arange(len(front))
# calculate crowding distance for the current front
crowding.append(crowding_of_front[I])
rank.append(np.array([k] * len(I)))
survivors.extend(front[I])
# create numpy arrays out of the lists
rank = np.concatenate(rank)
crowding = np.concatenate(crowding)
# get absolute index from filtering before
survivors = feasible[survivors]
# if infeasible solutions need to be added - individuals sorted by constraint violation are added
n_infeasible = (n_survive - len(survivors))
if n_infeasible > 0:
survivors = np.concatenate([survivors, infeasible[:n_infeasible]])
rank = np.concatenate([rank, Mathematics.INF * np.ones(n_infeasible)])
crowding = np.concatenate([crowding, -1.0 * np.ones(n_infeasible)])
# now truncate the population
pop.filter(survivors)
if D is not None:
D['rank'] = rank
D['crowding'] = crowding
def calc_crowding_distance(F):
infinity = 1e+14
n_points = F.shape[0]
n_obj = F.shape[1]
if n_points <= 2:
return np.full(n_points, infinity)
else:
# the final crowding distance result
crowding = np.zeros(n_points)
# sort each column and get index
I = np.argsort(F, axis=0, kind='mergesort')
# now really sort the whole array
F = F[I, np.arange(n_obj)]
# get the distance to the last element in sorted list and replace zeros with actual values
dist = np.concatenate([F, np.full((1, n_obj), np.inf)]) \
- np.concatenate([np.full((1, n_obj), -np.inf), F])
index_dist_is_zero = np.where(dist == 0)
dist_to_last = np.copy(dist)
for i, j in zip(*index_dist_is_zero):
dist_to_last[i, j] = dist_to_last[i - 1, j]
dist_to_next = np.copy(dist)
for i, j in reversed(list(zip(*index_dist_is_zero))):
dist_to_next[i, j] = dist_to_next[i + 1, j]
# normalize all the distances
norm = np.max(F, axis=0) - np.min(F, axis=0)
norm[norm == 0] = np.nan
dist_to_last, dist_to_next = dist_to_last[:-1] / norm, dist_to_next[1:] / norm
# if we divided by zero because all values in one columns are equal replace by none
dist_to_last[np.isnan(dist_to_last)] = 0.0
dist_to_next[np.isnan(dist_to_next)] = 0.0
# sum up the distance to next and last and norm by objectives - also reorder from sorted list
J = np.argsort(I, axis=0)
crowding = np.sum(dist_to_last[J, np.arange(n_obj)] + dist_to_next[J, np.arange(n_obj)], axis=1) / n_obj
# replace infinity with a large number
crowding[np.isinf(crowding)] = infinity
return crowding
if __name__ == '__main__':
F = np.random.rand(100, 2)
F[0] = np.max(F, axis=0)
F[5] = F[0]
F[50] = F[51]
crowding = calc_crowding_distance(F)
print(crowding[50], crowding[51])
print(crowding[0], crowding[5])
|
987,090 | 200dc0c8bc63942e81598e1d36e2f83c52743a4f | import asyncio
import time
import pytest
from google.protobuf import json_format
from grpc import RpcError
from jina.parsers import set_pea_parser
from jina.peapods.grpc import Grpclet
from jina.proto import jina_pb2
from jina.types.message.common import ControlMessage
@pytest.mark.slow
@pytest.mark.asyncio
@pytest.mark.timeout(5)
async def test_send_receive(mocker):
# AsyncMock does not seem to exist in python 3.7, this is a manual workaround
receive_cb = mocker.Mock()
async def mock_wrapper(msg):
receive_cb()
args = set_pea_parser().parse_args([])
grpclet = Grpclet(args=args, message_callback=mock_wrapper)
asyncio.get_event_loop().create_task(grpclet.start())
receive_cb.assert_not_called()
await grpclet.send_message(_create_msg(args))
await asyncio.sleep(0.1)
receive_cb.assert_called()
await grpclet.close(None)
@pytest.mark.slow
@pytest.mark.asyncio
@pytest.mark.timeout(5)
async def test_send_non_blocking(mocker):
receive_cb = mocker.Mock()
async def blocking_cb(msg):
receive_cb()
time.sleep(1.0)
return msg
args = set_pea_parser().parse_args([])
grpclet = Grpclet(args=args, message_callback=blocking_cb)
asyncio.get_event_loop().create_task(grpclet.start())
receive_cb.assert_not_called()
await grpclet.send_message(_create_msg(args))
await asyncio.sleep(0.1)
assert receive_cb.call_count == 1
await grpclet.send_message(_create_msg(args))
await asyncio.sleep(0.1)
assert receive_cb.call_count == 2
await grpclet.close(None)
@pytest.mark.slow
@pytest.mark.asyncio
@pytest.mark.timeout(5)
async def test_send_static_ctrl_msg(mocker):
# AsyncMock does not seem to exist in python 3.7, this is a manual workaround
receive_cb = mocker.Mock()
async def mock_wrapper(msg):
receive_cb()
args = set_pea_parser().parse_args([])
grpclet = Grpclet(args=args, message_callback=mock_wrapper)
asyncio.get_event_loop().create_task(grpclet.start())
receive_cb.assert_not_called()
while True:
try:
def send_status():
return Grpclet.send_ctrl_msg(
pod_address=f'{args.host}:{args.port_in}', command='STATUS'
)
await asyncio.get_event_loop().run_in_executor(None, send_status)
break
except RpcError:
await asyncio.sleep(0.1)
receive_cb.assert_called()
await grpclet.close(None)
def _create_msg(args):
msg = ControlMessage('STATUS')
routing_pb = jina_pb2.RoutingTableProto()
routing_table = {
'active_pod': 'pod1',
'pods': {
'pod1': {
'host': '0.0.0.0',
'port': args.port_in,
'expected_parts': 1,
'out_edges': [{'pod': 'pod2'}],
},
'pod2': {
'host': '0.0.0.0',
'port': args.port_in,
'expected_parts': 1,
'out_edges': [],
},
},
}
json_format.ParseDict(routing_table, routing_pb)
msg.envelope.routing_table.CopyFrom(routing_pb)
return msg
|
987,091 | d7ea2ea0828ca0957e8632ea1159708e77c15ff5 | from django.contrib import admin
# Register your models here.
from .models import User
from .models import Userfile, Ordremission, Message, Envoi
admin.site.register(User)
admin.site.register(Userfile)
admin.site.register(Ordremission)
admin.site.register(Envoi)
admin.site.register(Message) |
987,092 | 2edd826f8b0ce18093c8385f3cc8a007f07266a0 | #import sys
#input = sys.stdin.readline
from math import sqrt
def main():
N = int( input())
SX = [0 for _ in range(4)]
SY = [0 for _ in range(4)]
x_plus = 0
x_minus = 0
y_plus = 0
y_minus = 0
for _ in range(N):
x, y = map( int, input().split())
if x >= 0:
if y >= 0:
SX[0] += x
SY[0] += y
if y <= 0:
SX[1] += x
SY[1] += y
if x <= 0:
if y >= 0:
SX[2] += x
SY[2] += y
if y <= 0:
SX[3] += x
SY[3] += y
if x == 0:
if y > 0:
y_plus += y
else:
y_minus += y
if y == 0:
if x > 0:
x_plus += x
else:
x_minus += x
ans = 0
print(SX, SY, y_plus)
for i in range(4):
if ans**2 < SX[i]**2 + SY[i]**2:
ans = sqrt(SX[i]**2 + SY[i]**2)
print(ans)
S = (SX[0]+SX[3]-x_plus)**2 + (SY[0]+SY[3])**2
if ans**2 < S:
ans = sqrt(S)
S = (SX[1]+SX[2]-x_minus)**2 + (SY[1]+SY[2])**2
if ans**2 < S:
ans = sqrt(S)
S = (SX[0]+SX[1])**2 + (SY[0]+SY[1] - y_plus)**2
if ans**2 < S:
ans = sqrt(S)
S = (SX[2]+SX[3])**2 + (SY[2]+SY[3] - y_minus)**2
if ans**2 < S:
ans = sqrt(S)
print(ans)
if __name__ == '__main__':
main()
|
987,093 | 2a4e57c3b7ff19731af2715bb8e60b13a4537e0b | """" Module used in order to extract Marshmallow validation errors to pyBabel """
from gettext import gettext
def get_translations():
return [
gettext('Missing data for required field.'),
]
|
987,094 | 567b0bd3aa207ec17a0504619ce169ab508ba670 | # -*- coding: utf-8 -*-
"""
The sequence of triangle numbers is generated by adding the natural numbers.
So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
"""
import time
import math
start_time = time.time()
#Calculate the triangle number
def tn(n):
sum = 0
for i in range(0,n+1):
sum += i
return sum
def factors(x):
results = set()
for i in range(1, int(math.sqrt(x)) + 1):
if x % i == 0:
results.add(i)
results.add(int(x/i))
return results
a = 1
n_len = 1
n_array = []
while (n_len < 100) :
t_number = tn(a)
n_array = factors(t_number)
n_len = len(n_array)
a += 1
print ("Triangle number {:d} is {:d} and number of factors are: {:d} ".format(a,t_number,n_len))
#print (n_array)
elapsed_time = time.time() - start_time
print ("Elapse time: {:.2f} sec".format(elapsed_time)) |
987,095 | 62b54c37c09eca77dad4cf52664d28a548df9a58 | #
# Example file for working with date information
#
from datetime import date
from datetime import time
from datetime import datetime
from datetime import timedelta
def main():
# ## DATE OBJECTS
# # Get today's date from the simple today() method from the date class
# day = date.today()
# print("Todays date is", day)
# # print out the date's individual components
# print("Date is broken down", "day", day.day, "month",
# day.month, "year", day.year)
# # retrieve today's weekday (0=Monday, 6=Sunday)
# # use hashmap instead of switch statement
# # store the number corresponding to the day
# dayString = day.weekday()
# # create a hashmap with values
# switcher = {
# 0: "Monday",
# 1: "Tuesday",
# 2: "Wed",
# 3: "Th",
# 4: "Fr",
# 5: "Sat",
# 6: "Sun"
# }
# get value from hashmap with value from day
# print(switcher.get(dayString))
## DATETIME OBJECTS
# Get today's date from the datetime class
# dayDT = datetime.now()
# print(dayDT.strftime("%d-%B-%Y %H:%M:%S"))
# 01-May-2020 16:16:24
# print("DayDT", dayDT)
# # Get the current time
# print("Time is", dayDT.hour, dayDT.minute)
# t = datetime.time(datetime.now())
# print(t)
# Get tomorrow's date
today=date.today()
tomorrow2 = today+timedelta(days=1)
print(tomorrow2)
tomorrow = date(year=today.year,month=today.month,day=today.day+1 )
print(tomorrow)
if __name__ == "__main__":
main();
|
987,096 | 89eecf9daaff0f4a9d56db2f73d712da7af4a0f1 | def triangle():
print(" /\\\n/ \\\n----")
def rectangle():
print("+--+\n| |\n| |\n+--+")
|
987,097 | d6066e40b31230d089c6b44e641f4a93d379c94c | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-05-06 04:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('brand_name', models.CharField(max_length=250)),
('brand_des', models.CharField(max_length=500)),
('brand_origin', models.CharField(max_length=100)),
('brand_logo', models.FileField(upload_to='')),
],
),
migrations.CreateModel(
name='watch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('watch_name', models.CharField(max_length=50)),
('watch_year', models.CharField(max_length=10)),
('watch_pic', models.FileField(upload_to='')),
('watch_large_pic', models.FileField(upload_to='')),
('watch_brand', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='watch.Brand')),
],
),
]
|
987,098 | b44961ca679df134aae82e4898ff6316bc005426 | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^query_by_pos/$', views.query_by_pos, name='query_by_pos'),
url(r'^query_by_name/$', views.query_by_name, name='query_by_name'),
url(r'^query_by_name/result$', views.query_by_name, name='query_by_name_result'),
url(r'^detail/(?P<item_id>[0-9]+)$', views.item_detail, name='item_detail'),
url(r'^detail/(?P<item_id>[0-9]+)/change_status$', views.change_status, name='change_status')
] |
987,099 | c746aaade1b8437998ee1c3ff96291237c657981 | from django.conf.urls import patterns, url
from encuestas import views
urlpatterns = patterns('',
url(r'^$', views.index, name="index"),
url(r'^(?P<encuesta_id>\d+)/$', views.detalles, name="detalles"),
url(r'^(?P<encuesta_id>\d+)/resultados/$', views.resultados, name="resultados"),
url(r'^(?P<encuesta_id>\d+)/votos/$', views.votos, name="votos"),
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.