index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,300 | cf6d3a0fbf2a2daf8432622f780e138784ec505d | import re
IS_WITH_SINGLETON_REGEX = re.compile("(!=|==)\s*(True|False|None)")
def check_is_with_singleton(physical_line, line_number):
match_obj = IS_WITH_SINGLETON_REGEX.search(physical_line)
if match_obj is not None:
offset = match_obj.span()[0]
return (0, 12, (line_number, offset), "Use equal with singleton")
plugins = {
"physical_line": [check_is_with_singleton],
"logical_line": [],
"ast": []
} |
8,301 | f317d67b98eab1f0f192fa41f9bcc32b0c1e8eb0 | # Run 'python setup.py build' on cmd
import sys
from cx_Freeze import setup, Executable
import os.path
PYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))
os.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')
os.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')
options = {
'build_exe': {
'include_files': [
'bg_music.wav',
],
'path': sys.path + ['modules']
}
}
executables = [
Executable('game.py')
]
setup(name='Arkanoid',
version='1.0',
description='Python Game',
options=options,
executables=executables
)
|
8,302 | 3b15767988f1d958fc456f7966f425f93deb9017 | """
Given two strings, a and b, that may or may not be of the same length,
determine the minimum number of character deletions required to make
a and b anagrams. Any characters can be deleted from either of the strings.
"""
from collections import Counter
import math
import os
import random
import re
import sys
# Complete the makeAnagram function below.
def makeAnagram(a, b):
ct_a = Counter(a)
ct_b = Counter(b)
ct_a.subtract(ct_b)
return sum(abs(i) for i in ct_a.values())
if __name__ == '__main__':
a="cde"
b="abc"
res = makeAnagram(a, b)
print(res)
|
8,303 | 97029ac9f05037bf9304dacf86c35f5534d887c4 | class Solution:
def sumSubarrayMins(self, A: List[int]) -> int:
stack = []
prev = [None] * len(A)
for i in range(len(A)):
while stack and A[stack[-1]] >= A[i]:
stack.pop()
prev[i] = stack[-1] if stack else -1
stack.append(i)
stack = []
nex = [None] * len(A)
for i in range(len(A)-1, -1, -1):
while stack and A[stack[-1]] > A[i]:
stack.pop()
nex[i] = stack[-1] if stack else len(A)
stack.append(i)
return sum((i - prev[i]) * (nex[i] - i) * A[i] for i in range(len(A))) % (10 ** 9 + 7) |
8,304 | 56d5915d30e85285da549cc69ef25714bacc6f3a | from .alexnet import *
from .lenet import *
from .net import *
from .vae import * |
8,305 | 09420360ddcf2f74c2e130b4e09ae2a959e42e50 | class Solution:
def uncommonFromSentences(self, A: str, B: str) -> List[str]:
word_count = {}
A = A.split()
B = B.split()
whole = A + B
for word in whole:
if word not in word_count:
word_count[word] = 1
else:
word_count[word] += 1
return [word for word in word_count if word_count[word] == 1]
|
8,306 | 0964121d88fad2906311de7532eac52ff784fff6 | """
Main CLI endpoint for GeoCube
"""
import importlib.metadata
import click
from click import group
import geocube.cli.commands as cmd_modules
from geocube import show_versions
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
"token_normalize_func": lambda x: x.replace("-", "_"),
}
def check_version(ctx, _, value):
"""
Print current version, and check for latest version.
Called via 'geocube --version'
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
click.echo(f"geocube v{importlib.metadata.version('geocube')}")
ctx.exit()
def cli_show_version(ctx, _, value):
"""
Print debugging version information.
:param ctx: Application context object (click.Context)
:param value: Passed in by Click
:return None
"""
if not value or ctx.resilient_parsing:
return
show_versions()
ctx.exit()
@group(context_settings=CONTEXT_SETTINGS)
@click.option(
"-v",
"--version",
is_flag=True,
is_eager=True,
expose_value=False,
callback=check_version,
help="Show the current version",
)
@click.option(
"--show-versions",
is_flag=True,
is_eager=True,
expose_value=False,
callback=cli_show_version,
help="Show debugging version information",
)
def geocube():
"""Top-level command and entry point into the GeoCube CLI"""
def _add_subcommands():
"""
Individual commands (and sub-commands) are encapsulated in separate files
under /commands. Collect these command groups, and add them underneath the
top-level command (geocube).
"""
geocube.add_command(cmd_modules.make_geocube.make_geocube)
_add_subcommands()
|
8,307 | dce7fd0c9ed8e1d433f9131a8d137c8dcca4ac56 | #!/bin/python3
# TODO: implement the stack O(N) version
'''
Naive: O(N^3) or sum_{k=1...N}( O(N^2 (N-K)) )
for each size N
for each window of size N in the array
traverse the window to find the max
Naive with heap: O(N^2 log N)
for each size N O(N)
traverse array and accumulate window of size N O(N log N)
find max O(1)
DP:
Notice that min(W, p), the min size for window of size W and at position p, is
equal to min(min(W - 1, p), min(W - 1, p + 1)). Therefore, DP with these
tables can reduce the size of the problem to O(W^2) ~= O(N^2). Is this good
enough? No.
Domination windows:
Let us say that i dominates a contiguous range of n values if it's lower than
all n of its neighboring values. This means that i will show up as a min window
when considering window sizes of up to size n. We want to find the largest i
such that it domaintes other numbers in a window of size n. Now how to find this
efficiently? If we iterate through each i and compare it to its n neighbors,
that will also be O(N^2) time.
Start with lowest number and 1-dimensional flood fill. This will take O(N^2)
time in the worst case though.
However, you don't actually have to perform the flood fill. Instead, we can just
use the coordinates of lower numbers and perform something like binary search
to find the closest coordinates to a given coordinate in O(log N) time.
Overall this means that we iterate through each number, starting from the
lowest, and perform O(log N) time binary searches to find the boundaries over
which this element i dominates. Total time is O(N log N).
'''
import math
import os
import random
import re
import sys
from collections import defaultdict
from heapq import heappush, heappop
from bisect import insort_left
# Complete the riddle function below.
def riddle(lst):
'''
Holy fuck.
Better summary than above of what's happening:
Define an value `v` in the list to dominate a range of size `n`, including `v`
itself, if `v` is smaller than all other numbers in this contiguous range.
Define `v`'s "dominating window" to be the largest such range. If `v` has a
dominating window of size `n`, then it must show up as a value when we take
minimums of size `w`. Therefore, to find the maximum of all such minimum
windows, we only need to find the maximum `v` which dominates a range of size
`n` or greater, for each `n` between 1 and `N`.
To do this, the naive algorithm is to, for each number, flood fill in each
direction until you hit a number smaller than itself. However, we can instead
start with the smallest number, and keep a list of indices which we have
already processed, that we know is smaller than the number we're processing.
Using binary search, we can find the interval indices in which the current
index lies, and find the bounding interval in O(log N) time. Repeat for each
of `n` numbers for a total time complexity of O(N log N).
Finally, for each window size `w`, find the maximum `v` that dominates a range
of size `n` or larger.
It seems like this is not the best solution though. There is a O(N) solution
using stacks.
'''
max_by_w_size = { w: -float('inf') for w in range(1, len(lst) + 1) }
# note that bounding_indices are indexes into len(lst), not values themselves
bounding_indices = [-1, len(lst)]
sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])
for i, value in sorted_lst:
# note that l_index and r_index are indices to the bounding indices
r_index = bsearch(bounding_indices, i)
l_index = r_index - 1
l_point = bounding_indices[l_index]
r_point = bounding_indices[r_index]
# (l_point + 1, r_point) defines a "dominating window" for `value`
w = r_point - (l_point + 1)
assert w > 0
max_by_w_size[w] = max(max_by_w_size[w], value)
insort_left(bounding_indices, i)
m = -float('inf')
maxes = []
for w in reversed(range(1, len(lst) + 1)):
m = max(m, max_by_w_size[w])
maxes.append(m)
return reversed(maxes)
def bsearch(lst, target):
i, j = 0, len(lst)
while i < j:
mid = (i + j) // 2
if lst[mid] == target:
return mid + 1 # insert on the right side of the same number, not that it should matter?
elif lst[mid] < target:
i = mid + 1
else:
j = mid
return i
def riddle_dp(arr):
'''
Too slow to pass large test cases. See `riddle`.
'''
N = len(arr)
min_w = {} # dict of (win_size, win_position) to minimum
for i, el in enumerate(arr):
min_w[(1, i)] = el
for w in range(2, len(arr) + 1):
for i in range(N - w + 1):
# print('w, i', w, i)
min_w[(w, i)] = min(min_w[(w - 1, i)], min_w[(w - 1, i + 1)])
# print('min_w', min_w)
return [max(min_w[(w, i)] for i in range(N - w + 1)) for w in range(1, len(arr) + 1)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
|
8,308 | 758e5b9a65132c4bdee4600e79c27f9c0f272312 | import pymysql
import pymssql
import socket
import threading
from time import sleep
address = ('127.0.0.1', 20176)
usermode = {1: 'Wangcz_Students',
2: 'Wangcz_Teachers',
3: 'Wangcz_Admin'
}
def checkuser(username, password, cursor, user_db):
cursor.execute('''select * from %s WHERE username = %d AND password = %d''' % (user_db, int(username), int(password)))
return cursor.fetchall()
def tcplink(sock, addr):
conn = pymysql.connect()
cursor = conn.cursor()
while True:
bytedata = sock.recv(1024)
data = eval(bytedata.decode())
sleep(1)
if data:
if 'username' and 'password' and 'login_mode' in data.keys():
if checkuser(data['username'],data['password'],cursor=cursor, user_db=usermode[data[login_mode]]):
sock.send(b'Login success')#登陆成功
else:
sock.send(b'Error')#发送错误消息
else:
break
sock.close()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(address)
s.listen(10)
while True:
sock,addr = s.accept()
t = threading.Thread(target=tcplink,args=(sock,addr)) |
8,309 | dc28c3426f47bef8b691a06d54713bc68696ee44 | #!/usr/bin/env python3
import numpy as np
import os
import random
import pandas as pd
def read_chunk(reader, chunk_size):
data = {}
for i in range(chunk_size):
ret = reader.read_next()
for k, v in ret.items():
if k not in data:
data[k] = []
data[k].append(v)
data['input'] = np.array(data['input'])
data['masking'] = np.array(data['masking'])
data['timestamp'] = np.array(data['timestamp'])
data['label'] = np.array(data['label'])
return data
|
8,310 | a5eeafef694db04770833a4063358e8f32f467b0 | import os
from typing import List, Optional, Sequence
import boto3
from google.cloud import storage
from ..globals import GLOBALS, LOGGER
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client(
"secretsmanager",
region_name=GLOBALS.aws_region,
endpoint_url=GLOBALS.aws_endpoint_uri,
)
response = secrets_client.get_secret_value(SecretId=GLOBALS.gcs_key_secret_arn)
os.makedirs(
os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True,
)
with open(GLOBALS.google_application_credentials, "w") as f:
f.write(response["SecretString"])
def get_gs_files(
bucket: str,
prefix: str,
limit: Optional[int] = None,
exit_after_max: Optional[int] = None,
extensions: Sequence[str] = tuple(),
) -> List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix, max_results=limit))
LOGGER.info(f"Found files under gs://{bucket}/{prefix}: {blobs}")
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
def get_gs_subfolders(
bucket: str,
prefix: str,
) -> List[str]:
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
delimiter = "/"
if not prefix.endswith(delimiter):
prefix = prefix + delimiter
blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=delimiter)
try:
_ = next(blobs)
except StopIteration:
pass
found_prefixes = [
found_prefix.lstrip(prefix).strip("/") for found_prefix in blobs.prefixes
]
return found_prefixes
def get_gs_file_as_text(
bucket: str,
key: str,
) -> str:
"""
Get contents of a file as a string
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
blob = storage_client.get_bucket(bucket).get_blob(key)
return blob.download_as_text(encoding="utf-8")
|
8,311 | 398c28265e61831ba65b4ae2a785e57c0fa5b6d2 |
class Solution:
def toGoatLatin(self, S: str) -> str:
def exchange(str2):
if str2[0] in "aeiou":
str2 = str2+"ma"
else:
str2 = str2[1:]+str2[0]+"ma"
list2 = S.split(" ")
for i in list2:
res.append(exchange(i))
for i in res:
if __name__ == "__main__":
s = Solution()
str2 = "I speak Goat Latin"
print(s.toGoatLatin(str2))
|
8,312 | b16ad4bae079159da7ef88b61081d7763d4ae9a0 | #!/usr/bin/env python
##!/work/local/bin/python
##!/work/local/CDAT/bin/python
import sys,getopt
import matplotlib.pyplot as plt
def read():
x = []
y = []
for line in sys.stdin:
v1,v2 = line.split()[:2]
x.append(float(v1))
y.append(float(v2))
return x,y
#def plot(x,y):
def plot(x,y,xlabel,ylabel,title,fn):
fig = plt.figure( figsize=(6.0,6.0) )
ax = fig.add_subplot(111)
ax.grid(True)
if title:
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plot = ax.scatter( x, y, s=3, marker='o' )
mx = max(x)
mn = min(x)
plot = ax.plot( [mn,mx], [mn,mx] , 'r-')
if fn:
fname = fn
else:
fname = 'TMP_scat.png'
fig.savefig( fname, format='png' )
print 'WROTE --> %s' % fname
######################################
use = '''
Usage: %s
-h help
'''
if __name__ == '__main__':
def usage():
sys.stderr.write(use % sys.argv[0])
sys.exit(1)
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hx:y:o:t:')
except getopt.error:
usage()
fn = ''
x = 'X'
y = 'Y'
title = ''
for (opt,val) in opts:
if opt == '-x':
x = val
elif opt == '-y':
y = val
elif opt == '-t':
title = val
elif opt == '-o':
fn = val
else:
raise OptionError, opt
usage()
#if len(args) != 1:
# usage()
#fn = args[0]
xv,yv = read()
plot(xv,yv,x,y,title,fn)
|
8,313 | 19f202c32e1cf9f7ab2663827f1f98080f70b83e | from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from linebot import LineBotApi, WebhookParser
from linebot.exceptions import InvalidSignatureError, LineBotApiError
from linebot.models import MessageEvent, TextMessage
from module import func
from urllib.parse import parse_qsl
from func5api.models import users
from django.shortcuts import render
line_bot_api = LineBotApi(settings.LINE_CHANNEL_ACCESS_TOKEN)
parser = WebhookParser(settings.LINE_CHANNEL_SECRET)
@csrf_exempt
def callback(request):
if request.method == 'POST':
signature = request.META['HTTP_X_LINE_SIGNATURE']
body = request.body.decode('utf-8')
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
return HttpResponseForbidden()
except LineBotApiError:
return HttpResponseBadRequest()
for event in events:
if isinstance(event, MessageEvent):
user_id = event.source.user_id #取得user_id
if not(users.objects.filter(uid = user_id).exists()): #將user_id存入資料庫中
unit = users.objects.create(uid = user_id)
unit.save() #將user_id上傳至資料庫
if isinstance(event.message, TextMessage):
mtext = event.message.text
if mtext == '@修繕申請':
func.sendFix(event, user_id)
elif mtext =='@修繕查詢':
func.fix_inquire(event, user_id)
elif mtext == 'admin_mode':
func.judge(event, mtext, user_id)
elif mtext[:6] == '123456' and len(mtext) > 6: #all
func.judge(event, mtext, user_id)
elif mtext[:2] == '++' and len(mtext) > 2: #specify
func.judge(event, mtext, user_id)
elif mtext[:2] == '##' and len(mtext) > 2:
func.manageForm(event, mtext, user_id)
elif mtext[:3] == '!!!' and len(mtext) > 3:
func.personData(event, mtext, user_id)
return HttpResponse()
else:
return HttpResponseBadRequest()
def listall(request):
user = users.objects.all().order_by('name')
return render(request, "listall.html", locals())
|
8,314 | fd6cf903490ff4352e4721282354a68437ecb1e0 | from socket import *
from multiprocessing import Process
import sys
ADDR = ("127.0.0.1", 8888)
udp_socket = socket(AF_INET, SOCK_DGRAM)
# udp_socket.bind(("0.0.0.0",6955)) # udp套接字在一段时间不链接后,会自动重新分配端口,所以需要绑定
def login():
while True:
name = input("请输入昵称(不能重复)")
msg = "LOGIN" + "##" + name
udp_socket.sendto(msg.encode(), ADDR)
data, addr = udp_socket.recvfrom(1024)
if data.decode() == "0":
print("昵称已存在,请重新输入")
continue
else:
print("你已进入聊天室")
return name
def chat(name):
p = Process(target=receive, daemon=True)
p.start()
while True:
try:
content = input(">>>>")
except KeyboardInterrupt:
print("程序退出")
content = "" # 如果阻塞在input ctrl c 退出的话,调用my_exit函数
if not content:
my_exit(name)
msg = "CHAT" + "##" + f"{name}:" + content
udp_socket.sendto(msg.encode(), ADDR)
print("你发送了一条消息")
def my_exit(name):
msg = "EXIT" + "##" + name
print("您已退出聊天室")
udp_socket.sendto(msg.encode(), ADDR)
sys.exit()
def receive(): # 作为子进程,收到消息然后打印出收到的内容
while True:
data, addr = udp_socket.recvfrom(1024)
print("\n" + data.decode() + "\n>>>", end="")
def main():
name = login()
chat(name)
if __name__ == '__main__':
main()
|
8,315 | 88445d8466d7acbf29d2525c7e322611d66494cd | import sys
if sys.version_info.major == 2:
from itertools import izip
else:
izip = zip
|
8,316 | 6e07dcc3f3b8c7fbf8ce8d481b9612e7496967bd | Ylist = ['yes', 'Yes', 'Y', 'y']
Nlist = ['no', 'No', 'N', 'n']
America = ['America', 'america', 'amer', 'rica']
TRW = ['1775', 'The Revolutionary war', 'the Revolutionary war', 'the revolutionary war', 'The Revolutionary War',
'trw', 'Trw', 'TRW']
TCW = ['1861', 'The civil war', 'The civil War', 'The Civil war', 'The Civil war', 'The civil War', 'The Civil War',
'TCW', 'tcw', 'Tcw']
TGW = ['1917', 'The Great War', 'the great war', 'the great War', 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1',
'wW1', 'World War One', 'World war 1']
WW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two', 'World War 2', 'World War Two',
'world war two', 'world war two']
# Russia
Russia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']
RJW = ['1904', 'TRJW', 'trjw']
|
8,317 | fcd2bd91dff3193c661d71ade8039765f8498fd4 | '''
Created on Dec 18, 2011
@author: ppa
'''
import unittest
from ultrafinance.pyTaLib.indicator import Sma
class testPyTaLib(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSma(self):
sma = Sma(period = 3)
expectedAvgs = [1, 1.5, 2, 3, 4]
for index, number in enumerate(range(1, 6) ):
self.assertEqual(expectedAvgs[index], sma(number))
|
8,318 | 0699c9f70f1c16b4cb9837edf7a4ef27f021faec | def modCount(n, m):
if(m <= n):
inBetween = n - m
dividible = []
for x in range(m+1, n):
if(x%m == 0):
dividible.append(x)
return 'There are {} numbers between {} and {} \nand the ones that are dividible by {} are {}'.format(inBetween, m, n, m, dividible)
else:
return 'n must be higher value then m'
print(modCount(10,2))
|
8,319 | 81c9cabaa611f8e884708d535f0b99ff83ec1c0d | from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='SumoSound',
packages=['SumoSound'],
version='1.0.2',
license='MIT',
description='A python library to add 3D sound to a Sumo traffic simulation.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Patrick Malcolm',
author_email='patmalcolm91@gmail.com',
url='https://github.com/patmalcolm91/SumoSound',
download_url='https://github.com/patmalcolm91/SumoSound/archive/v_1.0.2.tar.gz',
keywords=['sumo', 'TraCI', 'sound', 'sound effects', '3D sound', 'OpenAL', 'traffic'],
install_requires=[
'pyopenal',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
package_data={'SumoSound': ['stock_sounds/*.wav']}
)
|
8,320 | edf704d720abdb09d176937664c9ba98bcd253a5 | message = input()
vowel = 'aeiouAEIOU'
consonant = 'bcdfghjklmnpqrstvwxyz'
consonant += consonant.upper()
vowel_count = 0
consonant_count = 0
for c in message:
if c in vowel:
vowel_count += 1
elif c in consonant:
consonant_count += 1
print(vowel_count, consonant_count)
|
8,321 | ad9bb34fdb05ab885f4871693729449f3618603a | #Script to extract features from chess score data file stockfish.csv
import numpy as np
import pandas as pd
#Load in and format raw chess game scoring data
raw_scores = [line.strip().split(",")[1].split() for line in open("stockfish.csv")][1:]
#Initialize containers for features to extract
game_length = []
average_score = []
score_stdev = []
largest_gain = []
largest_drop = []
max_score = []
min_score = []
ending_score = []
white_avg_improve = []
black_avg_improve = []
white_median_improve = []
black_median_improve = []
white_q1_improve =[]
white_q2_improve =[]
white_q3_improve =[]
white_q4_improve =[]
black_q1_improve =[]
black_q2_improve =[]
black_q3_improve =[]
black_q4_improve =[]
game_score10 = []
game_score20 = []
game_score30 = []
game_score40 = []
game_score50 = []
game_score60 = []
game_score70 = []
game_score80 = []
game_score90 = []
game_score100 = []
white_q1_max =[]
white_q2_max =[]
white_q3_max =[]
white_q4_max =[]
black_q1_max =[]
black_q2_max =[]
black_q3_max =[]
black_q4_max =[]
white_q1_min =[]
white_q2_min =[]
white_q3_min =[]
white_q4_min =[]
black_q1_min =[]
black_q2_min =[]
black_q3_min =[]
black_q4_min =[]
white_q1_stdev =[]
white_q2_stdev =[]
white_q3_stdev =[]
white_q4_stdev =[]
black_q1_stdev =[]
black_q2_stdev =[]
black_q3_stdev =[]
black_q4_stdev =[]
white_5_improve = []
white_10_improve = []
white_15_improve = []
white_20_improve = []
white_25_improve = []
white_30_improve = []
white_35_improve = []
white_40_improve = []
white_45_improve = []
white_50_improve = []
white_55_improve = []
white_60_improve = []
white_65_improve = []
white_70_improve = []
white_75_improve = []
black_5_improve = []
black_10_improve = []
black_15_improve = []
black_20_improve = []
black_25_improve = []
black_30_improve = []
black_35_improve = []
black_40_improve = []
black_45_improve = []
black_50_improve = []
black_55_improve = []
black_60_improve = []
black_65_improve = []
black_70_improve = []
black_75_improve = []
#Loop through game data, calculate and append new features to feature containers
for game in raw_scores:
game_len = len(game)+1 # Add 1 to game length to avoid divide by zero errors caused by empty games
total = 0
prev = None
player = 1
max_so_far = -100
min_so_far = 100
max_drop = 0
max_gain = 0
white_improve = [0]
black_improve = [0]
game_nums = [0]
for score in game:
if score != "NA":
score = int(score)
game_nums.append(score)
total+=score
if prev != None:
change = score - prev
if change < max_drop:
max_drop = change
if change > max_gain:
max_gain = change
if player == 1:
black_improve.append(change)
else:
white_improve.append(change)
player = abs(player-1)
prev = score
if score > max_so_far:
max_so_far = score
if score < min_so_far:
min_so_far = score
#Add computed values to feature containers
white_avg = sum(white_improve)/(game_len/2)
black_avg = sum(black_improve)/(game_len/2)
game_length.append(game_len)
average_score.append(total/game_len)
score_stdev.append(np.std(np.array(game_nums)))
largest_gain.append(max_gain)
largest_drop.append(max_drop)
max_score.append(max_so_far)
min_score.append(min_so_far)
white_avg_improve.append(white_avg)
black_avg_improve.append(black_avg)
white_median_improve.append(sorted(white_improve)[len(white_improve)//2])
black_median_improve.append(sorted(black_improve)[len(black_improve)//2])
white_q1_improve.append( sum(white_improve[0:len(white_improve)//4])/len(white_improve)//4 )
white_q2_improve.append( sum(white_improve[len(white_improve)//4 : (len(white_improve)//4)*2])/len(white_improve)//4 )
white_q3_improve.append( sum(white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3])/len(white_improve)//4 )
white_q4_improve.append( sum(white_improve[(len(white_improve)//4)*3 : ])/len(white_improve)//4 )
black_q1_improve.append( sum(black_improve[0:len(black_improve)//4])/len(black_improve)//4 )
black_q2_improve.append( sum(black_improve[len(black_improve)//4 : (len(black_improve)//4)*2])/len(black_improve)//4 )
black_q3_improve.append( sum(black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3])/len(black_improve)//4 )
black_q4_improve.append( sum(black_improve[(len(black_improve)//4)*3 : ])/len(black_improve)//4 )
white_q1_max.append(max(white_improve[0:1+len(white_improve)//4]))
white_q2_max.append(max(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))
white_q3_max.append(max(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))
white_q4_max.append(max(white_improve[(len(white_improve)//4)*3 : ]))
black_q1_max.append(max(black_improve[0:1+len(black_improve)//4]))
black_q2_max.append(max(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))
black_q3_max.append(max(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))
black_q4_max.append(max(black_improve[(len(black_improve)//4)*3 : ]))
white_q1_min.append(min(white_improve[0:1+len(white_improve)//4]))
white_q2_min.append(min(white_improve[len(white_improve)//4 : 1+(len(white_improve)//4)*2]))
white_q3_min.append(min(white_improve[(len(white_improve)//4)*2 : 1+(len(white_improve)//4)*3]))
white_q4_min.append(min(white_improve[(len(white_improve)//4)*3 : ]))
black_q1_min.append(min(black_improve[0:1+len(black_improve)//4]))
black_q2_min.append(min(black_improve[len(black_improve)//4 : 1+(len(black_improve)//4)*2]))
black_q3_min.append(min(black_improve[(len(black_improve)//4)*2 : 1+(len(black_improve)//4)*3]))
black_q4_min.append(min(black_improve[(len(black_improve)//4)*3 : ]))
white_q1_stdev.append(np.std(np.array((white_improve[0:len(white_improve)//4]))))
white_q2_stdev.append(np.std(np.array((white_improve[len(white_improve)//4 : (len(white_improve)//4)*2]))))
white_q3_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*2 : (len(white_improve)//4)*3]))))
white_q4_stdev.append(np.std(np.array((white_improve[(len(white_improve)//4)*3 : ]))))
black_q1_stdev.append(np.std(np.array((black_improve[0:len(black_improve)//4]))))
black_q2_stdev.append(np.std(np.array((black_improve[len(black_improve)//4 : (len(black_improve)//4)*2]))))
black_q3_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*2 : (len(black_improve)//4)*3]))))
black_q4_stdev.append(np.std(np.array((black_improve[(len(black_improve)//4)*3 : ]))))
if len(white_improve) >=5:
white_5_improve.append( sum(white_improve[0:5])/5 )
else:
white_5_improve.append(white_avg)
if len(white_improve) >=10:
white_10_improve.append( sum(white_improve[5:10])/5 )
else:
white_10_improve.append(white_avg)
if len(white_improve) >=15:
white_15_improve.append( sum(white_improve[10:15])/5 )
else:
white_15_improve.append(white_avg)
if len(white_improve) >=20:
white_20_improve.append( sum(white_improve[15:20])/5 )
else:
white_20_improve.append(white_avg)
if len(white_improve) >=25:
white_25_improve.append( sum(white_improve[20:25])/5 )
else:
white_25_improve.append(white_avg)
if len(white_improve) >=30:
white_30_improve.append( sum(white_improve[25:30])/5 )
else:
white_30_improve.append(white_avg)
if len(white_improve) >=35:
white_35_improve.append( sum(white_improve[30:35])/5 )
else:
white_35_improve.append(white_avg)
if len(white_improve) >=40:
white_40_improve.append( sum(white_improve[35:40])/5 )
else:
white_40_improve.append(white_avg)
if len(white_improve) >=45:
white_45_improve.append( sum(white_improve[40:45])/5 )
else:
white_45_improve.append(white_avg)
if len(white_improve) >=50:
white_50_improve.append( sum(white_improve[45:50])/5 )
else:
white_50_improve.append(white_avg)
if len(white_improve) >=55:
white_55_improve.append( sum(white_improve[50:55])/5 )
else:
white_55_improve.append(white_avg)
if len(white_improve) >=60:
white_60_improve.append( sum(white_improve[55:60])/5 )
else:
white_60_improve.append(white_avg)
if len(white_improve) >=65:
white_65_improve.append( sum(white_improve[60:65])/5 )
else:
white_65_improve.append(white_avg)
if len(white_improve) >=70:
white_70_improve.append( sum(white_improve[65:70])/5 )
else:
white_70_improve.append(white_avg)
if len(white_improve) >=75:
white_75_improve.append( sum(white_improve[70:75])/5 )
else:
white_75_improve.append(white_avg)
if len(black_improve) >=5:
black_5_improve.append( sum(black_improve[0:5])/5 )
else:
black_5_improve.append(black_avg)
if len(black_improve) >=10:
black_10_improve.append( sum(black_improve[5:10])/5 )
else:
black_10_improve.append(black_avg)
if len(black_improve) >=15:
black_15_improve.append( sum(black_improve[10:15])/5 )
else:
black_15_improve.append(black_avg)
if len(black_improve) >=20:
black_20_improve.append( sum(black_improve[15:20])/5 )
else:
black_20_improve.append(black_avg)
if len(black_improve) >=25:
black_25_improve.append( sum(black_improve[20:25])/5 )
else:
black_25_improve.append(black_avg)
if len(black_improve) >=30:
black_30_improve.append( sum(black_improve[25:30])/5 )
else:
black_30_improve.append(black_avg)
if len(black_improve) >=35:
black_35_improve.append( sum(black_improve[30:35])/5 )
else:
black_35_improve.append(black_avg)
if len(black_improve) >=40:
black_40_improve.append( sum(black_improve[35:40])/5 )
else:
black_40_improve.append(black_avg)
if len(black_improve) >=45:
black_45_improve.append( sum(black_improve[40:45])/5 )
else:
black_45_improve.append(black_avg)
if len(black_improve) >=50:
black_50_improve.append( sum(black_improve[45:50])/5 )
else:
black_50_improve.append(black_avg)
if len(black_improve) >=55:
black_55_improve.append( sum(black_improve[50:55])/5 )
else:
black_55_improve.append(black_avg)
if len(black_improve) >=60:
black_60_improve.append( sum(black_improve[55:60])/5 )
else:
black_60_improve.append(black_avg)
if len(black_improve) >=65:
black_65_improve.append( sum(black_improve[60:65])/5 )
else:
black_65_improve.append(black_avg)
if len(black_improve) >=70:
black_70_improve.append( sum(black_improve[65:70])/5 )
else:
black_70_improve.append(black_avg)
if len(black_improve) >=75:
black_75_improve.append( sum(black_improve[70:75])/5 )
else:
black_75_improve.append(black_avg)
if len(game_nums)>10:
game_score10.append(game_nums[10])
else:
game_score10.append(0)
if len(game_nums)>20:
game_score20.append(game_nums[20])
else:
game_score20.append(0)
if len(game_nums)>30:
game_score30.append(game_nums[30])
else:
game_score30.append(0)
if len(game_nums)>40:
game_score40.append(game_nums[40])
else:
game_score40.append(0)
if len(game_nums)>50:
game_score50.append(game_nums[50])
else:
game_score50.append(0)
if len(game_nums)>60:
game_score60.append(game_nums[60])
else:
game_score60.append(0)
if len(game_nums)>70:
game_score70.append(game_nums[70])
else:
game_score70.append(0)
if len(game_nums)>80:
game_score80.append(game_nums[80])
else:
game_score80.append(0)
if len(game_nums)>90:
game_score90.append(game_nums[90])
else:
game_score90.append(0)
if len(game_nums)>100:
game_score100.append(game_nums[100])
else:
game_score100.append(0)
if prev:
ending_score.append(prev)
else:
ending_score.append(0)
chess_dict = {"game_length":game_length,"average_score":average_score,"score_stdev":score_stdev,"largest_gain":largest_gain,
"largest_drop":largest_drop,"max_score":max_score,"min_score":min_score,
"ending_score":ending_score, "white_avg_improve":white_avg_improve,
"black_avg_improve":black_avg_improve,"white_median_improve":white_median_improve,
"black_median_improve":black_median_improve,"white_q1_improve":white_q1_improve,
"white_q2_improve":white_q2_improve,
"white_q3_improve":white_q3_improve,
"white_q4_improve":white_q4_improve,"black_q1_improve":black_q1_improve,
"black_q2_improve":black_q2_improve,
"black_q3_improve":black_q3_improve,
"black_q4_improve":black_q4_improve,
'white_5_improve': white_5_improve,
'white_10_improve': white_10_improve,
'white_15_improve': white_15_improve,
'white_20_improve': white_20_improve,
'white_25_improve': white_25_improve,
'white_30_improve': white_30_improve,
'white_35_improve': white_35_improve,
'white_40_improve': white_40_improve,
'white_45_improve': white_45_improve,
'white_50_improve': white_50_improve,
'white_55_improve': white_55_improve,
'white_60_improve': white_60_improve,
'white_65_improve': white_65_improve,
'white_70_improve': white_70_improve,
'white_75_improve': white_75_improve,
'black_5_improve': black_5_improve,
'black_10_improve': black_10_improve,
'black_15_improve': black_15_improve,
'black_20_improve': black_20_improve,
'black_25_improve': black_25_improve,
'black_30_improve': black_30_improve,
'black_35_improve': black_35_improve,
'black_40_improve': black_40_improve,
'black_45_improve': black_45_improve,
'black_50_improve': black_50_improve,
'black_55_improve': black_55_improve,
'black_60_improve': black_60_improve,
'black_65_improve': black_65_improve,
'black_70_improve': black_70_improve,
'black_75_improve': black_75_improve,
'white_q1_max': white_q1_max,
'white_q2_max': white_q2_max,
'white_q3_max': white_q3_max,
'white_q4_max': white_q4_max,
'black_q1_max': black_q1_max,
'black_q2_max': black_q2_max,
'black_q3_max': black_q3_max,
'black_q4_max': black_q4_max,
'white_q1_min': white_q1_min,
'white_q2_min': white_q2_min,
'white_q3_min': white_q3_min,
'white_q4_min': white_q4_min,
'black_q1_min': black_q1_min,
'black_q2_min': black_q2_min,
'black_q3_min': black_q3_min,
'black_q4_min': black_q4_min,
'white_q1_stdev': white_q1_stdev,
'white_q2_stdev': white_q2_stdev,
'white_q3_stdev': white_q3_stdev,
'white_q4_stdev': white_q4_stdev,
'black_q1_stdev': black_q1_stdev,
'black_q2_stdev': black_q2_stdev,
'black_q3_stdev': black_q3_stdev,
'black_q4_stdev': black_q4_stdev,
'game_score10':game_score10,
'game_score20':game_score20,
'game_score30':game_score30,
'game_score40':game_score40,
'game_score50':game_score50,
'game_score60':game_score60,
'game_score70':game_score70,
'game_score80':game_score80,
'game_score90':game_score90,
'game_score100':game_score100
}
#Create feature data frame
chess_df = pd.DataFrame(chess_dict, index=[x for x in range(1,50001)])
chess_df.index.name = "Event"
#Write the new feature data frame to CSV
chess_df.to_csv("score_features.csv")
|
8,322 | 45dc9d362a2ddfd408f93452bda0b7338057ca81 | from django.db import models
from django.utils import timezone
from pprint import pprint
class Cast(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
image = models.ImageField(upload_to='cast', blank=True, null=True)
description = models.CharField(max_length=400, blank=True, null=True)
def __str__(self):
return self.name
class Issue(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
image = models.ImageField(upload_to='issues', blank=True, null=True)
issue_number = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Comic(models.Model):
MAX_PAGES_PER_ISSUE = 1000
sort_number = models.IntegerField(blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True )
last_page = models.IntegerField(default=1)
title = models.CharField(max_length=200, blank=True, null=True)
issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=models.DO_NOTHING)
image = models.ImageField(upload_to='comics', blank=True, null=True)
date_added = models.DateTimeField(
help_text="Posted on: ",
default = timezone.now, null=True, blank=True
)
cast_members = models.ManyToManyField(Cast, related_name="comics", blank=True)
class Meta:
ordering = ['-sort_number', '-date_added']
def __str__(self):
return self.title
@staticmethod
def sortOrder(page_number):
# TODO: ADD ISSUE 3 LOGIC WHEN WE GET THERE
if int(page_number) < 33:
issue_num = 1
else:
issue_num = 2
# print('ISSUE NUM: ', issue_num)
order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)
# print ('SORT ORDER: ', order)
return order
def save(self, *args, **kwargs):
self.sort_number = Comic.sortOrder(self.page_number)
super(Comic, self).save(*args, **kwargs) # Call the "real" save() method.
class ComicManager(models.Model):
last_page = models.IntegerField(default=1)
class Meta:
verbose_name_plural = ("Comic Manager")
def __str__(self):
return str(self.last_page)
def save(self, *args, **kwargs):
super(ComicManager, self).save(*args, **kwargs)
# TODO - automate this so that anytime a comic is saved it checks last page status and runs here
# update all Comic instances to have this last page
comics = Comic.objects.all()
for comic in comics:
if comic.last_page < self.last_page:
comic.last_page = self.last_page
comic.save()
class HeaderImage(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='images', blank=True, null=True)
class Meta:
verbose_name_plural = ('Header Images')
def __str__(self):
return self.title
|
8,323 | 19221823f14cf06a55d445fc241fc04e64e5873c | # This is the template file for Lab #5, Task #1
import numpy
import lab5
def digitize(samples,threshold):
return 1*(samples > threshold)
class ViterbiDecoder:
# given the constraint length and a list of parity generator
# functions, do the initial set up for the decoder. The
# following useful instance variables are created:
# self.k
# self.nstates
# self.r
# self.predecessor_states
# self.expected_parity
def __init__(self,k,glist):
self.k = k # constraint length
self.nstates = 2**(k-1) # number of states in state machine
# number of parity bits transmitted for each message bit
self.r = len(glist)
# States are named using (k-1)-bit integers in the range 0 to
# nstates-1. The bit representation of the integer corresponds
# to state label in the transition diagram. So state 10 is
# named with the integer 2, state 00 is named with the
# integer 0.
# for each state s, figure out the two states in the diagram
# that have transitions ending at state s. Record these two
# states as a two-element tuple.
self.predecessor_states = \
[((2*s+0) % self.nstates,(2*s+1) % self.nstates)
for s in xrange(self.nstates)]
# this is a 2D table implemented as a list of lists.
# self.expected_parity[s1][s2] returns the r-bit sequence
# of parity bits the encoder transmitted when make the
# state transition from s1 to s2.
self.expected_parity = \
[[lab5.expected_parity(s1,s2,k,glist) \
if s1 in self.predecessor_states[s2] else None
for s2 in xrange(self.nstates)]
for s1 in xrange(self.nstates)]
# expected is an r-element list of the expected parity bits
# (or you can also think of them as voltages given how we send
# bits down the channel). received is an r-element list of
# actual sampled voltages for the incoming parity bits.
# This is a hard-decision branch metric, so, as described in
# lab write up, digitize the received voltages to get bits and
# then compute the Hamming distance between the expected sequence
# and the received sequences, return that as the branch metric.
# Consider using lab5.hamming(seq1,seq2) which computes the
# Hamming distance between two binary sequences.
def branch_metric(self,expected,received):
assert len(expected) == len(received) # they must be the same length
vTh = 0.5
dSamples = digitize(received,vTh)
return lab5.hamming(expected,dSamples)
# compute self.PM[...,n] from the batch of r parity bits and
# the path metrics for self.PM[...,n-1] computed on the previous
# iteration. Follow the algorithm described in the lab
# write up. In addition to making an entry for self.PM[n,s] for
# each state s, keep track of the most-likely predecessor
# for each state in the self.Predecessor array. You'll probably
# find the following instance variables and methods useful:
# self.predecessor_states
# self.expected_parity
# self.branch_metric()
def viterbi_step(self,n,received_voltages):
for state in xrange(self.nstates):
(alpha,beta) = self.predecessor_states[state]
(pAlpha,pBeta) = (self.expected_parity[alpha][state],self.expected_parity[beta][state])
bmAlpha = self.branch_metric(pAlpha,received_voltages)
bmBeta = self.branch_metric(pBeta,received_voltages)
pmAlpha = self.PM[alpha][n-1]+bmAlpha
pmBeta = self.PM[beta][n-1]+bmBeta
if pmAlpha <= pmBeta:
self.PM[state][n] = pmAlpha
self.Predecessor[state][n] = alpha
else:
self.PM[state][n] = pmBeta
self.Predecessor[state][n] = beta
# Identify the most-likely ending state of the encoder by
# finding the state s which has the mimimum value of PM[s,n]
# where n points to the last column of the trellis. If there
# are several states with the same minimum value, the end of
# the message has been corrupted by errors, so decrement n
# and repeat the search. Keep doing this until a unique s is
# found. Return the tuple (s,n).
def most_likely_state(self,n):
minState = [0]
minValue = self.PM[0,n]
for state in range(1,self.nstates):
if self.PM[state][n] < minValue:
minState = [state]
minValue = self.PM[state,n]
elif self.PM[state][n] == minValue:
minState.append(state)
if len(minState) > 1: # message is corrupted by errors
return self.most_likely_state(n-1)
else:
return (minState[0],n)
# starting at state s at time n, use the Predecessor
# array to find all the states on the most-likely
# path. Each state contributes a message bit...
def traceback(self,s,n):
message = []
while n > 0:
# message bit that caused transition to
# state s is also the high-order bit of
# the state name
message.append(s >> (self.k-2))
# back to the next earlier state along the path
s = self.Predecessor[s,n]
n -= 1
message.reverse()
return message
# figure out what the transmitter sent from info in the
# received voltages
def decode(self,received_voltages,debug=False):
# figure out how many columns they'll be in the trellis
nreceived = len(received_voltages)
max_n = (nreceived/2) + 1
# this is the path metric trellis itself, organized as a
# 2D array: rows are the states, columns are the time points.
# PM[s,n] is the metric for the most-likely path through the
# trellis arriving at state s at time n.
self.PM = numpy.zeros((self.nstates,max_n),dtype=numpy.float)
# at time 0, the starting state is the most likely, the other
# states are "infinitely" worse.
self.PM[1:self.nstates,0] = 1000000
# a 2D array: rows are the states, columns are the time
# points, contents indicate the predecessor state for each
# current state.
self.Predecessor = numpy.zeros((self.nstates,max_n),
dtype=numpy.int)
# use the Viterbi algorithm to compute PM
# incrementally from the received parity bits.
n = 0
for i in xrange(0,nreceived,self.r):
n += 1
# Fill in the next columns of PM, Predecessor based
# on info in the next r incoming parity bits
self.viterbi_step(n,received_voltages[i:i+self.r])
# print out what was just added to the trellis state
if debug:
print self.PM[:,n],self.Predecessor[:,n]
# find the most-likely ending state from the last row
# of the trellis
s,n = self.most_likely_state(n)
# reconstruct message by tracing the most likely path
# back through the matrix using self.Predecessor.
return self.traceback(s,n)
# print out final path metrics
def dump_state(self):
print self.PM[:,-1]
if __name__=='__main__':
d = ViterbiDecoder(3,(7,6))
received = numpy.array([1,1,1,0,1,1,0,0,0,1,1,0,0,0])
message = d.decode(received,debug=True)
print "decoded message =",message |
8,324 | 32227029cb4e852536611f7ae5dec5118bd5e195 | # SPDX-License-Identifier: Apache-2.0
"""
.. _example-lightgbm-pipe:
Convert a pipeline with a LightGbm model
========================================
.. index:: LightGbm
*sklearn-onnx* only converts *scikit-learn* models into *ONNX*
but many libraries implement *scikit-learn* API so that their models
can be included in a *scikit-learn* pipeline. This example considers
a pipeline including a *LightGbm* model. *sklearn-onnx* can convert
the whole pipeline as long as it knows the converter associated to
a *LGBMClassifier*. Let's see how to do it.
Train a LightGBM classifier
+++++++++++++++++++++++++++
"""
import lightgbm
import onnxmltools
import skl2onnx
import onnx
import sklearn
import matplotlib.pyplot as plt
import os
from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
import onnxruntime as rt
from onnxruntime.capi.onnxruntime_pybind11_state import Fail as OrtFail
from skl2onnx import convert_sklearn, update_registered_converter
from skl2onnx.common.shape_calculator import (
calculate_linear_classifier_output_shapes,
) # noqa
from onnxmltools.convert.lightgbm.operator_converters.LightGbm import (
convert_lightgbm,
) # noqa
import onnxmltools.convert.common.data_types
from skl2onnx.common.data_types import FloatTensorType
import numpy
from sklearn.datasets import load_iris
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from lightgbm import LGBMClassifier
data = load_iris()
X = data.data[:, :2]
y = data.target
ind = numpy.arange(X.shape[0])
numpy.random.shuffle(ind)
X = X[ind, :].copy()
y = y[ind].copy()
pipe = Pipeline(
[("scaler", StandardScaler()), ("lgbm", LGBMClassifier(n_estimators=3))]
)
pipe.fit(X, y)
######################################
# Register the converter for LGBMClassifier
# +++++++++++++++++++++++++++++++++++++++++
#
# The converter is implemented in *onnxmltools*:
# `onnxmltools...LightGbm.py
# <https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/
# lightgbm/operator_converters/LightGbm.py>`_.
# and the shape calculator:
# `onnxmltools...Classifier.py
# <https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/
# lightgbm/shape_calculators/Classifier.py>`_.
##############################################
# Then we import the converter and shape calculator.
###########################
# Let's register the new converter.
update_registered_converter(
LGBMClassifier,
"LightGbmLGBMClassifier",
calculate_linear_classifier_output_shapes,
convert_lightgbm,
options={"nocl": [True, False], "zipmap": [True, False, "columns"]},
)
##################################
# Convert again
# +++++++++++++
model_onnx = convert_sklearn(
pipe,
"pipeline_lightgbm",
[("input", FloatTensorType([None, 2]))],
target_opset={"": 12, "ai.onnx.ml": 2},
)
# And save.
with open("pipeline_lightgbm.onnx", "wb") as f:
f.write(model_onnx.SerializeToString())
###########################
# Compare the predictions
# +++++++++++++++++++++++
#
# Predictions with LightGbm.
print("predict", pipe.predict(X[:5]))
print("predict_proba", pipe.predict_proba(X[:1]))
##########################
# Predictions with onnxruntime.
try:
sess = rt.InferenceSession("pipeline_lightgbm.onnx")
except OrtFail as e:
print(e)
print("The converter requires onnxmltools>=1.7.0")
sess = None
if sess is not None:
pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)})
print("predict", pred_onx[0])
print("predict_proba", pred_onx[1][:1])
##################################
# Display the ONNX graph
# ++++++++++++++++++++++
pydot_graph = GetPydotGraph(
model_onnx.graph,
name=model_onnx.graph.name,
rankdir="TB",
node_producer=GetOpNodeProducer(
"docstring", color="yellow", fillcolor="yellow", style="filled"
),
)
pydot_graph.write_dot("pipeline.dot")
os.system("dot -O -Gdpi=300 -Tpng pipeline.dot")
image = plt.imread("pipeline.dot.png")
fig, ax = plt.subplots(figsize=(40, 20))
ax.imshow(image)
ax.axis("off")
#################################
# **Versions used for this example**
print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", rt.__version__)
print("skl2onnx: ", skl2onnx.__version__)
print("onnxmltools: ", onnxmltools.__version__)
print("lightgbm: ", lightgbm.__version__)
|
8,325 | 1e292872c0c3c7f4ec0115f0769f9145ef595ead | # -*- coding: utf-8 -*-
# __author__ = 'XingHuan'
# 3/27/2018
import os
import imageio
import time
os.environ['IMAGEIO_FFMPEG_EXE'] = 'D:/Program Files/ffmpeg-3.4/bin/ffmpeg.exe'
reader = imageio.get_reader('test1080.mov')
print reader
fps = reader.get_meta_data()['fps']
print fps
# for i, im in enumerate(reader):
# print i
nums = [10, 200]
for num in nums:
a = time.time()
image = reader.get_data(num)
b = time.time()
print b - a
# print image |
8,326 | e265b2b2ccc0841ccb8b766de4ae2a869f2d280d | import tensorflow as tf
from keras import layers, Model, Input
from keras.utils import Progbar, to_categorical
from keras.datasets.mnist import load_data
import numpy as np
import matplotlib.pyplot as plt
import config
import datetime
img_height, img_width, _ = config.IMAGE_SHAPE
(X, Y), (_, _) = load_data()
X = X.reshape((-1, img_height, img_width, 1))
X = X.astype("float32")
Y = to_categorical(Y, num_classes=10, dtype="float32")
def preprocess(img, lbl):
img = (img - 127.5) / 127.5
img = tf.convert_to_tensor(img, dtype=tf.float32)
return img, lbl
class Generator(Model):
def __init__(self, name):
super(Generator, self).__init__(name=name)
self.dense = layers.Dense(7*7*128)
self.conv1 = layers.Conv2DTranspose(128, kernel_size=5, padding="same")
self.conv2 = layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding="same")
self.conv3 = layers.Conv2DTranspose(32, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2DTranspose(1, kernel_size=5, activation="tanh", padding="same")
self.relu = layers.ReLU()
self.bn1 = layers.BatchNormalization()
self.bn2 = layers.BatchNormalization()
self.bn3 = layers.BatchNormalization()
self.bn4 = layers.BatchNormalization()
def call(self, inputs, training=None, mask=None):
noise, label = inputs
x = layers.Concatenate()([noise, label])
x = self.dense(x)
x = layers.Reshape(target_shape=(7, 7, 128))(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv4(x)
return x
def get_config(self):
return {'name': self.name}
class Discriminator(Model):
def __init__(self, name, img_shape=(28, 28, 1)):
super(Discriminator, self).__init__(name=name)
self.img_shape = img_shape
self.conv1 = layers.Conv2D(32, kernel_size=5, strides=2)
self.conv2 = layers.Conv2D(64, kernel_size=5, strides=2)
self.conv3 = layers.Conv2D(128, kernel_size=5, strides=2, padding="same")
self.conv4 = layers.Conv2D(256, kernel_size=5, padding="same")
self.leaky_relu = layers.LeakyReLU(alpha=0.2)
self.flatten = layers.Flatten()
self.dense_final = layers.Dense(1, activation='sigmoid')
self.dense = layers.Dense(7*7*16)
def call(self, inputs, training=None, mask=None):
image, label = inputs
lb = self.dense(label)
lb = layers.Reshape(target_shape=(28, 28, 1))(lb)
x = layers.Concatenate()([image, lb])
x = self.leaky_relu(x)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.flatten(x)
x = self.dense_final(x)
return x
def get_config(self):
return {"img_shape": self.img_shape, "name": self.name}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
gen = Generator(name="generator")
disc = Discriminator(name="discriminator", img_shape=config.IMAGE_SHAPE)
gen_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
disc_optimizer = tf.keras.optimizers.Adam(learning_rate=0.0002)
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
train_dataset = dataset.take(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
val_dataset = dataset.skip(int(0.8 * len(X))).map(preprocess).shuffle(10000).batch(config.BATCH_SIZE)
checkpoint = tf.train.Checkpoint(generator=gen,
gen_optimizer=gen_optimizer,
discriminator=disc,
disc_optimizer=disc_optimizer)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=config.CKPT_DIR, max_to_keep=3)
# creates a summary writer, writes a summary in a file to access on tensorboard later
summary_writer = tf.summary.create_file_writer(
logdir=config.LOG_DIR + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
'''LOSSES'''
def disc_loss(real_logits, fake_logits):
real_loss = tf.losses.BinaryCrossentropy()(tf.ones_like(real_logits), real_logits)
fake_loss = tf.losses.BinaryCrossentropy()(tf.zeros_like(fake_logits), fake_logits)
loss = 0.5*(real_loss + fake_loss)
return loss
def gen_loss(fake_logits):
loss = tf.losses.BinaryCrossentropy()(tf.ones_like(fake_logits), fake_logits)
return loss
# give signature to avoid retracing
signature = [
tf.TensorSpec(shape=(None, 28, 28, 1), dtype=tf.float32),
tf.TensorSpec(shape=(None, 10), dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.int64)
]
@tf.function(input_signature=signature)
def train_step(image_batch, label_batch, epoch):
noise = tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM))
with tf.GradientTape(persistent=True) as tape:
fake_img_batch = gen([noise, label_batch], training=True)
fake_logits = disc([fake_img_batch, label_batch], training=True)
real_logits = disc([image_batch, label_batch], training=True)
d_loss = disc_loss(real_logits, fake_logits)
g_loss = gen_loss(fake_logits)
gen_grads = tape.gradient(g_loss, gen.trainable_variables)
disc_grads = tape.gradient(d_loss, disc.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, gen.trainable_variables))
disc_optimizer.apply_gradients(zip(disc_grads, disc.trainable_variables))
# writes a tensorboard summary (creates graph if scalar)
with summary_writer.as_default():
tf.summary.scalar("generator_loss", g_loss, step=epoch)
tf.summary.scalar("discriminator_loss", d_loss, step=epoch)
g_loss = tf.metrics.Mean()
d_loss = tf.metrics.Mean()
prog_bar = Progbar(1500, stateful_metrics=[g_loss, d_loss])
if ckpt_manager.latest_checkpoint:
checkpoint.restore(ckpt_manager.latest_checkpoint).expect_partial()
print(f"Restored the training checkpoint...{ckpt_manager.latest_checkpoint}")
def train():
for epoch in range(config.EPOCHS):
print(f"\nEpoch {epoch+1}/{config.EPOCHS} :")
for n, (image, label) in enumerate(train_dataset):
train_step(image, label, epoch+1)
prog_bar.update(n)
if (epoch+1) % 5 == 0:
ckpt_manager.save()
def generate():
z = tf.random.normal((10, config.NOISE_DIM))
indices = np.arange(0, 10)
labels = tf.one_hot(indices, depth=10)
print(labels)
out = gen([z, labels])
out = (out.numpy() * 127.5) + 127.5 # de-process
for i in range(10):
plt.subplot(1, 10, i + 1)
plt.axis("off")
plt.imshow(out[i].reshape((img_height, img_width)), cmap='gray')
plt.show()
if __name__ == "__main__":
train() # train loop
'''Test Code'''
# gen_out = gen([tf.random.normal((config.BATCH_SIZE, config.NOISE_DIM)),
# tf.ones((config.BATCH_SIZE, 10))])
# disc_out = disc([tf.random.normal((config.BATCH_SIZE,) + config.IMAGE_SHAPE),
# tf.ones((config.BATCH_SIZE, 10))])
#
# assert gen_out.shape == (32, 28, 28, 1)
|
8,327 | 950b2906853c37cdeaa8ed1076fff79dbe99b6f8 | import typing
import torch.nn as nn
from .torch_utils import get_activation, BatchNorm1d
from dna.models.torch_modules.torch_utils import PyTorchRandomStateContext
class Submodule(nn.Module):
def __init__(
self, layer_sizes: typing.List[int], activation_name: str, use_batch_norm: bool, use_skip: bool = False,
dropout: float = 0.0, *, device: str = 'cuda:0', seed: int = 0
):
super().__init__()
with PyTorchRandomStateContext(seed):
n_layers = len(layer_sizes) - 1
activation = get_activation(activation_name)
layers = []
for i in range(n_layers):
if i > 0:
layers.append(activation())
if dropout > 0.0:
layers.append(nn.Dropout(p=dropout))
if use_batch_norm:
layers.append(BatchNorm1d(layer_sizes[i]))
layers.append(nn.Linear(layer_sizes[i], layer_sizes[i+1]))
self.net = nn.Sequential(*layers)
self.net.to(device=device)
if use_skip:
if layer_sizes[0] == layer_sizes[-1]:
self.skip = nn.Sequential()
else:
self.skip = nn.Linear(layer_sizes[0], layer_sizes[-1])
self.skip.to(device=device)
else:
self.skip = None
def forward(self, x):
if self.skip is None:
return self.net(x)
else:
return self.net(x) + self.skip(x)
|
8,328 | 4e94e9e2b45d3786aa86be800be882cc3d5a80b5 | """
table.py [-m] base1 base2 ... baseN
Combines output from base1.txt, base2.txt, etc., which are created by
the TestDriver (such as timcv.py) output, and displays tabulated
comparison statistics to stdout. Each input file is represented by
one column in the table.
Optional argument -m shows a final column with the mean value of each
statistic.
"""
def suck(f):
hamdevall = spamdevall = (0.0, 0.0)
cost = 0.0
bestcost = 0.0
fp = 0
fn = 0
un = 0
fpp = 0.0
fnp = 0.0
unp = 0.0
htest = 0
stest = 0
get = f.readline
while 1:
line = get()
if line.startswith('-> <stat> tested'):
print(line, end=' ')
elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:
vals = line.split(';')
mean = float(vals[1].split()[-1])
sdev = float(vals[2].split()[-1])
val = (mean, sdev)
ntested = int(vals[0].split()[-2])
typ = vals[0].split()[2]
if line.find('for all runs') != -1:
if typ == 'Ham':
hamdevall = val
htest = ntested
else:
spamdevall = val
stest = ntested
elif line.startswith('-> best cost for all runs: $'):
bestcost = float(line.split('$')[-1])
elif line.startswith('-> <stat> all runs false positives: '):
fp = int(line.split()[-1])
elif line.startswith('-> <stat> all runs false negatives: '):
fn = int(line.split()[-1])
elif line.startswith('-> <stat> all runs unsure: '):
un = int(line.split()[-1])
elif line.startswith('-> <stat> all runs false positive %: '):
fpp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs false negative %: '):
fnp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs unsure %: '):
unp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs cost: '):
cost = float(line.split('$')[-1])
break
return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,
hamdevall, spamdevall)
def windowsfy(fn):
import os
if os.path.exists(fn + '.txt'):
return fn + '.txt'
else:
return fn
def table():
import getopt, sys
showMean = 0
fname = "filename: "
fnam2 = " "
ratio = "ham:spam: "
rat2 = " "
fptot = "fp total: "
fpper = "fp %: "
fntot = "fn total: "
fnper = "fn %: "
untot = "unsure t: "
unper = "unsure %: "
rcost = "real cost:"
bcost = "best cost:"
hmean = "h mean: "
hsdev = "h sdev: "
smean = "s mean: "
ssdev = "s sdev: "
meand = "mean diff:"
kval = "k: "
tfptot = tfpper = tfntot = tfnper = tuntot = tunper = trcost = tbcost = \
thmean = thsdev = tsmean = tssdev = tmeand = tkval = 0
args, fileargs = getopt.getopt(sys.argv[1:], 'm')
for arg, val in args:
if arg == "-m":
showMean = 1
for filename in fileargs:
filename = windowsfy(filename)
(htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,
hamdevall, spamdevall) = suck(file(filename))
if filename.endswith('.txt'):
filename = filename[:-4]
filename = filename[filename.rfind('/')+1:]
filename = filename[filename.rfind("\\")+1:]
if len(fname) > len(fnam2):
fname += " "
fname = fname[0:(len(fnam2) + 12)]
fnam2 += " %11s" % filename
else:
fnam2 += " "
fnam2 = fnam2[0:(len(fname) + 12)]
fname += " %11s" % filename
if len(ratio) > len(rat2):
ratio += " "
ratio = ratio[0:(len(rat2) + 12)]
rat2 += " %11s" % ("%d:%d" % (htest, stest))
else:
rat2 += " "
rat2 = rat2[0:(len(ratio) + 12)]
ratio += " %11s" % ("%d:%d" % (htest, stest))
fptot += "%12d" % fp
tfptot += fp
fpper += "%12.2f" % fpp
tfpper += fpp
fntot += "%12d" % fn
tfntot += fn
fnper += "%12.2f" % fnp
tfnper += fnp
untot += "%12d" % un
tuntot += un
unper += "%12.2f" % unp
tunper += unp
rcost += "%12s" % ("$%.2f" % cost)
trcost += cost
bcost += "%12s" % ("$%.2f" % bestcost)
tbcost += bestcost
hmean += "%12.2f" % hamdevall[0]
thmean += hamdevall[0]
hsdev += "%12.2f" % hamdevall[1]
thsdev += hamdevall[1]
smean += "%12.2f" % spamdevall[0]
tsmean += spamdevall[0]
ssdev += "%12.2f" % spamdevall[1]
tssdev += spamdevall[1]
meand += "%12.2f" % (spamdevall[0] - hamdevall[0])
tmeand += (spamdevall[0] - hamdevall[0])
k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])
kval += "%12.2f" % k
tkval += k
nfiles = len(fileargs)
if nfiles and showMean:
fptot += "%12d" % (tfptot/nfiles)
fpper += "%12.2f" % (tfpper/nfiles)
fntot += "%12d" % (tfntot/nfiles)
fnper += "%12.2f" % (tfnper/nfiles)
untot += "%12d" % (tuntot/nfiles)
unper += "%12.2f" % (tunper/nfiles)
rcost += "%12s" % ("$%.2f" % (trcost/nfiles))
bcost += "%12s" % ("$%.2f" % (tbcost/nfiles))
hmean += "%12.2f" % (thmean/nfiles)
hsdev += "%12.2f" % (thsdev/nfiles)
smean += "%12.2f" % (tsmean/nfiles)
ssdev += "%12.2f" % (tssdev/nfiles)
meand += "%12.2f" % (tmeand/nfiles)
kval += "%12.2f" % (tkval/nfiles)
print(fname)
if len(fnam2.strip()) > 0:
print(fnam2)
print(ratio)
if len(rat2.strip()) > 0:
print(rat2)
print(fptot)
print(fpper)
print(fntot)
print(fnper)
print(untot)
print(unper)
print(rcost)
print(bcost)
print(hmean)
print(hsdev)
print(smean)
print(ssdev)
print(meand)
print(kval)
if __name__ == "__main__":
table()
|
8,329 | 0ac471d2cb30a21c1246106ded14cdc4c06d2d40 | #!/usr/bin/env python3
from collections import OrderedDict
import torch.nn as nn
from fairseq.models import FairseqMultiModel, register_model
from pytorch_translate import common_layers, utils
@register_model("multilingual")
class MultilingualModel(FairseqMultiModel):
"""
To use, you must extend this class and define single_model_cls as a class
variable. Example:
@register_model("multilingual_transformer")
class MultilingualTransformerModel(MultilingualModel):
single_model_cls = TransformerModel
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
MultilingualModel.add_args(parser)
"""
def __init__(self, task, encoders, decoders):
super().__init__(encoders, decoders)
self.task = task
self.models = nn.ModuleDict(
{
key: self.__class__.single_model_cls(task, encoders[key], decoders[key])
for key in self.keys
}
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--share-encoder-embeddings",
action="store_true",
help="share encoder embeddings across languages",
)
parser.add_argument(
"--share-decoder-embeddings",
action="store_true",
help="share decoder embeddings across languages",
)
parser.add_argument(
"--share-encoders",
action="store_true",
help="share encoders across languages",
)
parser.add_argument(
"--share-decoders",
action="store_true",
help="share decoders across languages",
)
@staticmethod
def set_multilingual_arch_args(args):
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False)
args.share_encoders = getattr(args, "share_encoders", False)
args.share_decoders = getattr(args, "share_decoders", False)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if not hasattr(args, "max_source_positions"):
args.max_source_positions = 1024
if not hasattr(args, "max_target_positions"):
args.max_target_positions = 1024
src_langs = [lang_pair.split("-")[0] for lang_pair in task.lang_pairs]
tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
# encoders/decoders for each language
lang_encoders, lang_decoders = {}, {}
def get_encoder(lang, shared_encoder_embed_tokens=None):
if lang not in lang_encoders:
src_dict = task.dicts[lang]
if shared_encoder_embed_tokens is None:
encoder_embed_tokens = common_layers.Embedding(
num_embeddings=len(src_dict),
embedding_dim=args.encoder_embed_dim,
padding_idx=src_dict.pad(),
freeze_embed=args.encoder_freeze_embed,
normalize_embed=getattr(args, "encoder_normalize_embed", False),
)
utils.load_embedding(
embedding=encoder_embed_tokens,
dictionary=src_dict,
pretrained_embed=args.encoder_pretrained_embed,
)
else:
encoder_embed_tokens = shared_encoder_embed_tokens
lang_encoders[lang] = cls.single_model_cls.build_encoder(
args, src_dict, embed_tokens=encoder_embed_tokens
)
return lang_encoders[lang]
def get_decoder(lang, shared_decoder_embed_tokens=None):
"""
Fetch decoder for the input `lang`, which denotes the target
language of the model
"""
if lang not in lang_decoders:
tgt_dict = task.dicts[lang]
if shared_decoder_embed_tokens is None:
decoder_embed_tokens = common_layers.Embedding(
num_embeddings=len(tgt_dict),
embedding_dim=args.decoder_embed_dim,
padding_idx=tgt_dict.pad(),
freeze_embed=args.decoder_freeze_embed,
)
utils.load_embedding(
embedding=decoder_embed_tokens,
dictionary=tgt_dict,
pretrained_embed=args.decoder_pretrained_embed,
)
else:
decoder_embed_tokens = shared_decoder_embed_tokens
lang_decoders[lang] = cls.single_model_cls.build_decoder(
args, task.dicts[lang], tgt_dict, embed_tokens=decoder_embed_tokens
)
return lang_decoders[lang]
# shared encoders/decoders (if applicable)
shared_encoder, shared_decoder = None, None
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=src_langs,
embed_dim=args.encoder_embed_dim,
build_embedding=common_layers.build_embedding,
pretrained_embed_path=None,
)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=tgt_langs,
embed_dim=args.decoder_embed_dim,
build_embedding=common_layers.build_embedding,
pretrained_embed_path=None,
)
encoders, decoders = OrderedDict(), OrderedDict()
for lang_pair, src_lang, tgt_lang in zip(task.lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (
shared_encoder
if shared_encoder is not None
else get_encoder(
src_lang, shared_encoder_embed_tokens=shared_encoder_embed_tokens
)
)
decoders[lang_pair] = (
shared_decoder
if shared_decoder is not None
else get_decoder(
tgt_lang, shared_decoder_embed_tokens=shared_decoder_embed_tokens
)
)
return cls(task, encoders, decoders)
|
8,330 | 4dda122a8c3a2aab62bb202945f6fb9cb73cf772 | from numpy import sqrt
def Schout2ConTank(a, b, d):
# This function converts parameters from Schoutens notation to Cont-Tankov
# notation
## Code
th = d * b / sqrt(a ** 2 - b ** 2)
k = 1 / (d * sqrt(a ** 2 - b ** 2))
s = sqrt(d / sqrt(a ** 2 - b ** 2))
return th, k, s
|
8,331 | c9f1768e2f2dd47d637c2e577067eb6cd163e972 | from functools import partial
def power_func(x, y, a=1, b=0):
return a*x**y + b
new_func = partial(power_func, 2, a=4)
print(new_func(4, b=1))
print(new_func(1))
|
8,332 | 7eefcfdb9682cb09ce2d85d11aafc04977016ba4 | from urllib import request, parse
import pandas as pd
import json
import os
class BusInfo:
url = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:Bus'
url_busstop = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusstopPole.json'
url_routes = 'https://api-tokyochallenge.odpt.org/api/v4/odpt:BusroutePattern.json'
params = {
'odpt:operator': 'odpt.Operator:YokohamaMunicipal',
}
bus_stops = None
bus_routes = None
@staticmethod
def init():
apiKey = os.getenv('BUS_TOKEN')
BusInfo.params['acl:consumerKey'] = apiKey
BusInfo.getBusStops()
BusInfo.getBusRoutes()
@staticmethod
def getBusRoutes():
#BusInfo.bus_routes = pd.DataFrame()
#return
busroute_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_routes, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'route_id': v['owl:sameAs'],
'route_name': v['dc:title'],
}
busroute_list.append(busstop)
except Exception:
pass
BusInfo.bus_routes = pd.DataFrame(busroute_list).set_index('route_id')
@staticmethod
def getBusStops():
#BusInfo.bus_stops = pd.DataFrame()
#return
busstop_list=[]
req = request.Request('{}?{}'.format(BusInfo.url_busstop, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
busstop = { 'busstop_id': v['owl:sameAs'],
'pole_name': v['dc:title'],
}
busstop_list.append(busstop)
except Exception:
pass
BusInfo.bus_stops = pd.DataFrame(busstop_list).set_index('busstop_id')
@staticmethod
def update():
bus_list=[]
req = request.Request('{}?{}'.format(BusInfo.url, parse.urlencode(BusInfo.params)))
with request.urlopen(req) as res:
json_load = json.load(res)
for v in json_load:
try:
if v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:Empty':
occupancy = '空いている'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:ManySeatsAvailable':
occupancy = '空き座席多数'
color='blue'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:FewSeatsAvailable':
occupancy = '座席わすか'
color='yellow'
elif v['odpt:occupancyStatus'] == 'odpt.OccupancyStatus:StandingRoomOnly':
occupancy = '混雑'
color='red'
else:
color='gray'
bus = { 'bus_id': v['odpt:busNumber'],
'lat': v['geo:lat'],
'lng': v['geo:long'],
'route_num': v['odpt:busroute'][-3:],
'route_id': v['odpt:busroutePattern'],
'prevStop': v['odpt:fromBusstopPole'],
'nextStop': v['odpt:toBusstopPole'],
'occupancy' : occupancy,
'color' : color,
'azimuth' : v['odpt:azimuth'],
'img_url' : 'https://mxl00474.github.io/test_static/arrow_' + color + '.png'
}
bus_list.append(bus)
except Exception:
pass
df = pd.DataFrame(bus_list).set_index('bus_id')
df = pd.merge(df, BusInfo.bus_stops, left_on='prevStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_stops, left_on='nextStop', right_index=True, how='left')
df = pd.merge(df, BusInfo.bus_routes, left_on='route_id', right_index=True, how='left')
return df.fillna("-")
if __name__ == '__main__':
BusInfo.init()
print('=== Get stop info ===')
BusInfo.getBusStops()
print(BusInfo.bus_stops)
print('=== Get route info ===')
BusInfo.getBusRoutes()
#print(BusInfo.bus_routes)
print(len(BusInfo.bus_routes))
print('=== Get bus info ===')
bus_list = BusInfo.update()
print(bus_list)
print(bus_list.columns)
|
8,333 | 7e23f5598ccfe9aff74d43eb662f860b0404b7ec | #!/usr/bin/env python
"""
A package that determines the current day of the week.
"""
from datetime import date
import calendar
# Set the first day of the week as Sunday.
calendar.firstday(calendar.SUNDAY)
def day_of_the_week(arg):
"""
Returns the current day of the week.
"""
if arg == "day":
day_of_the_week = calendar.day_name[date.today().weekday()]
print("Today is " + day_of_the_week + ".")
#Raise exception for invalid argument
else:
raise Exception ("Invalid argument for day of the week")
def info():
"""
Returns information about the package.
"""
info = "This package determines the day of the week."
print(info)
if __name__ == "__main__"
day("today")
info()
|
8,334 | 61c2a6499dd8de25045733f9061d660341501314 | #!/usr/bin/python2
import gmpy2
p = 24659183668299994531
q = 28278904334302413829
e = 11
c = 589000442361955862116096782383253550042
t = (p-1)*(q-1)
n = p*q
# returns d such that e * d == 1 modulo t, or 0 if no such y exists.
d = gmpy2.invert(e,t)
# Decryption
m = pow(c,d,n)
print "Solved ! m = %d" % m
|
8,335 | 4a8fa195a573f8001e55b099a8882fe71bcca233 | """storeproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.db import router
from django.urls import path, include
from rest_framework import routers
from spencersapp import views
router = routers.DefaultRouter()
router.register(r'users', views.CategoryView)
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index_view, name='index_view'),
path('about_view/', views.about_view, name='about_view'),
path('shop_view/', views.shop_view, name='shop_view'),
path('checkout_view/', views.checkout_view, name='checkout_view'),
path('contactus_view/', views.contactus_view, name='contactus_view'),
path('gallery_view/', views.gallery_view, name='gallery_view'),
path('shopdetail_view/', views.shopdetail_view, name='shopdetail_view'),
path('cart_view/', views.cart_view, name='cart_view'),
path('myaccount_view/', views.myaccount_view, name='myaccount_view'),
path('wishlist_view/', views.wishlist_view, name='wishlist_view'),
path('category/', views.category, name='category'),
path('include_search_view/', views.include_search_view, name='include_search_view'),
path('catsearch_view/<int:id>', views.catsearch_view, name='catsearch_view'),
# path('registration_view/', views.registration_view, name='registration_view'),
# path('login_view/', views.login_view, name='login_view'),
path('home_view/', views.home_view, name='home_view'),
path('directcat_view/<int:id>', views.directcat_view, name='directcat_view'),
path('showlist/', views.showlist, name='showlist'),
path('product_search/', views.product_search, name='product_search'),
path('emailresponse/', views.emailresponse, name='emailresponse'),
# path('user_login/', views.user_login, name='user_login'),
path('accounts/', include('django.contrib.auth.urls')),
path('signup/', views.signup_view, name='signup'),
path('accounts/logout/', views.logout_view, name='logout_view'),
path('myaccount_details/', views.myaccount_details, name='myaccount_details'),
path('sort/<str:key>', views.sort_view, name='sort'),
path('profile/', views.profile, name='profile'),
path('change_password/', views.change_password, name='change_password'),
path('addcart/',views.add_to_cart, name='addcart'),
path('cart_bag/', views.cart_bag, name='cart_bag'),
path('update_cart/', views.update_cart, name='update_cart'),
path('cart_del/<int:id>', views.cart_del, name='cart_del'),
path('', include(router.urls)),
path('users/', include('rest_framework.urls', namespace='rest_framework'))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
8,336 | 8c71bc5d53bf5c4cb20784659eddf8a97efb86ef | # #----------------------------------------#
# 3.4
#
# Question:
# Write a program which can map() to make a list whose elements are square of elements in [1,2,3,4,5,6,7,8,9,10].
#
|
8,337 | 98fb70e1911522365292c86603481656e7b86d73 | from django.contrib import admin
from .models import CarouselImage, Budget
admin.site.register(CarouselImage)
admin.site.register(Budget)
|
8,338 | 30986eb0a6cd82f837dd14fb383529a6a41def9a | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('c4c_app', '0006_c4cjob_complete'),
]
operations = [
migrations.AlterModelOptions(
name='c4cbranch',
options={'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'},
),
migrations.AlterModelOptions(
name='c4cdonation',
options={'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'},
),
migrations.AlterModelOptions(
name='c4cevent',
options={'verbose_name': 'Event', 'verbose_name_plural': 'Events'},
),
migrations.AlterModelOptions(
name='c4cjob',
options={'verbose_name': 'Job', 'verbose_name_plural': 'Jobs'},
),
migrations.AlterModelOptions(
name='c4cuser',
options={'verbose_name': 'C4C User', 'verbose_name_plural': 'C4C Users'},
),
migrations.RemoveField(
model_name='c4cbranch',
name='officers',
),
migrations.AddField(
model_name='c4cbranch',
name='group',
field=models.OneToOneField(related_name='in_branches', default=None, to='auth.Group'),
preserve_default=False,
),
migrations.AddField(
model_name='c4cbranch',
name='officers_group',
field=models.OneToOneField(related_name='is_branch_officer_of', default=None, to='auth.Group'),
preserve_default=False,
),
migrations.AddField(
model_name='c4cjob',
name='offer',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='c4cjob',
name='duration',
field=models.IntegerField(null=True),
preserve_default=True,
),
]
|
8,339 | e560f2f202e477822729d1361b8d7ef7831a00e6 | # ------------------------------------------
#
# Project: VEXcode VR Maze Solver
# Author: Hyunwoo Choi
# Created: January 12 2021
# Description: Solves a VEXcode VR maze using the right hand rule
#
# ------------------------------------------
# Library imports
from vexcode import *
#main
def main():
#putting down the pen to show the path of the robot
pen.set_pen_color(BLUE)
pen.move(DOWN)
drivetrain.set_drive_velocity(50, PERCENT)
drivetrain.set_turn_velocity(50, PERCENT)
#start with 90 deg turned right since we are using a right hand rule to solve this maze
drivetrain.turn_for(RIGHT, 90, DEGREES)
#run
run()
#this method checks all three sides and returns a boolean for each side if it is blocked or not
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
#main run function
def run():
#program loop
while True:
#drive
drivetrain.drive_for(FORWARD, 250, MM)
#checks if the robot's surroundings are clear by using the method above
rightClear, frontClear, leftClear = checkSides()
#uses the 3 boolean values above to determine the which direction to turn
if frontClear and not rightClear:
print("")
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif (not (rightClear and frontClear)) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
#if found an exit, stop
if(down_eye.detect(RED)):
break
wait(1,MSEC)
# VR threads — Do not delete
vr_thread(main())
|
8,340 | 800573786913ff2fc37845193b5584a0a815533f | # use local image
import io
import os
from google.cloud import vision
from google.oauth2 import service_account
creds = service_account.Credentials.from_service_account_file('./key.json')
client = vision.ImageAnnotatorClient(
credentials=creds,
)
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
"./dog.jpg")
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
request = {
"image": {
"content": content
},
"features": [
{
"max_results": 2,
"type": "LABEL_DETECTION"
},
{
"type": "SAFE_SEARCH_DETECTION"
}
]
}
response = client.annotate_image(request)
print(response)
print(response.safe_search_annotation.adult)
for label in response.label_annotations:
print(label.description) |
8,341 | 75837ab778e94693151de1c17b59e12f8b2336d3 | def divide(file):
index = 0
head = ''
while True:
if file[index].isnumeric():
head_index = index
break
if file[index].isalpha():
head += file[index].lower()
else:
head += file[index]
index += 1
while True:
if index >= len(file):
number = int(file[head_index:])
tail = ''
break
if not file[index].isnumeric():
number = int(file[head_index:index])
tail = file[index:]
break
index += 1
return head, number, tail
def solution(files):
ans = []
for i, file in enumerate(files):
head, number, tail = divide(file)
ans.append((head, number, i))
ans.sort(key=lambda x: [x[0], x[1], x[2]])
answer = []
for h, n, i in ans:
answer.append(files[i])
return answer |
8,342 | b7632cc7d8fc2f9096f7a6bb61c471dc61689f70 | import pandas as pd
import numpy as np
import urllib.request
import urllib.parse
import json
def predict(input_text):
URL = "http://127.0.0.1:8000/api/v1/predict/"
values = {
"format": "json",
"input_text": input_text,
}
data = urllib.parse.urlencode({'input_text': input_text}).encode('utf-8')
request = urllib.request.Request(URL, data)
response = urllib.request.urlopen(request)
result= json.loads(response.read())
return result['neg_pos']
if __name__ == '__main__':
print("Start if __name__ == '__main__'")
print('load csv file ....')
df = pd.read_csv("test.csv", engine="python", encoding="utf-8-sig")
df["PREDICT"] = np.nan #予測列を追加
print('Getting prediction results ....')
for index, row in df.iterrows():
df.at[index, "PREDICT"] = predict(row['INPUT'])
print('save results to csv file')
df.to_csv("predicted_test .csv", encoding="utf-8-sig", index=False)
print('Processing terminated normally.')
|
8,343 | 1c8145007edb09d77a3b15de5c34d0bc86c0ba97 | import argparse # for handling command line arguments
import collections # for container types like OrderedDict
import configparser
import hashlib # for SHA-1
import os
import re
import sys
import zlib # git compresses everything using zlib
argparser = argparse.ArgumentParser(description="The stupid content tracker")
# we don't just call git, we always call git command (init, add, clone)
# hence we need to add subparsers to our arg parser
# dest=command means the command we pass will be stored as a string
# in an attribute called command
argsubparsers = argparser.add_subparsers(title="Commands", dest="command")
argsubparsers.required = True
def main(args = sys.argv[1:]):
args = argparser.parse_args(argv)
if args.command == "add" : cmd_add(args)
elif args.command == "cat-file" : cmd_cat_file(args)
elif args.command == "checkout" : cmd_checkout(args)
elif args.command == "commit" : cmd_commit(args)
elif args.command == "hash-object" : cmd_hash_object(args)
elif args.command == "init" : cmd_init(args)
elif args.command == "log" : cmd_log(args)
elif args.command == "ls-tree" : cmd_ls-tree(args)
elif args.command == "merge" : cmd_merge(args)
elif args.command == "rebase" : cmd_rebase(args)
elif args.command == "rev-parse" : cmd_rev_parse(args)
elif args.command == "rm" : cmd_rm(args)
elif args.command == "show-ref" : cmd_show_ref(args)
elif args.command == "tag" : cmd_tag(args)
# abstraction for a git repository
class GitRepository(object):
"""A git repository"""
# a git repo contains 2 things, worktree which is the folder we want to apply version control on
# and a .git repo where git stores its own things
# the config file is stored in .git/config
worktree = None
gitdir = None
conf = None
# an additional force parameter to disable checks
def __init__(self, path, force=False):
self.worktree = path
self.gitdir = os.path.join(path, ".git")
if not (force or os.path.isdir(self.gitdir)):
raise Exception("Not a git repository %s" % path)
# Read configuration file in .git/config
self.conf = configparser.ConfigParser()
cf = repo_file(self, "config")
if cf and os.path.exists(cf):
self.conf.read([cf])
elif not force:
raise Exception("Configuration file missing")
if not force:
vers = int(self.conf.get("core", "repositoryformatversion"))
if vers != 0:
raise Exception("Unsupported repositoryformatversion %s " %vers)
# we will be doing a lot of path manipulations hence we will write some utility functions
def repo_path(repo, *path):
"""Compute path under repo's gitdir"""
return os.path.join(repo.gitdir, *path)
def repo_file(repo, *path, mkdir=False):
"""Same as repo_path, but creates dirname(*path) if absent. For example repo_file(r, "refs", "remotes", "origin")
will create .git/refs/remotes."""
if repo_dir(repo, *path[:-1], mkdir=mkdir):
return repo_path(repo, *path)
def repo_dir(repo, *path, mkdir=False):
"""Same as repo_path, but mkdir *path if absent if mkdir"""
path = repo_path(repo, *path)
if os.path.exists(path):
if (os.path.isdir(path)):
return path
else:
raise Exception("Not a directory %s" % path)
if mkdir:
os.makedirs(path)
return path
else:
return None
# to create a new git repo, we create the following paths
# .git is the git repository
# .git/objects: the object store
# .git/refs: the reference store, it contains 2 subdirs heads and tags
# .git/HEAD: a reference to the current head
# .git/config: repository's configuration file
# .git/description: repository's description file
def repo_create(path):
"""Create a new repository at path."""
repo = GitRepository(path, True)
if os.path.exists(repo.worktree):
if not os.path.isdir(repo.worktree):
raise Exception("%s is not a directory!" % path)
if os.listdir(repo.worktree):
raise Exception("%s is not empty!" % path)
else:
os.makedirs(repo.worktree)
assert(repo_dir(repo, "branches", mkdir=True)
assert(repo_dir(repo, "objects", mkdir=True)
assert(repo_dir(repo, "refs", "tags", mkdir=True)
assert(repo_dir(repo, "refs", "heads", mkdir=True)
# .git/description
with open(repo_file(repo, "description"), "w") as f:
f.write("Unnamed repository: edit this file 'description' to name the repository.\n")
# .git/HEAD
with open(repo_file(repo, "HEAD"), "w") as f:
f.write("ref: refs/heads/master\n")
with open(repo_file(repo, "config"), "w") as f:
config = repo_default_config()
config.write(f)
return repo
|
8,344 | 257a4d0b0c713624ea8452dbfd6c5a96c9a426ad | import pymysql
import logging
import socket
from models.platformconfig import Pconfig
class ncbDB(Pconfig):
# I have to retrieve basic configuration attributes, listed below, from system config file
# on ApplSrv, for example : /etc/ncb_applsrv/ncb_applsrv.conf
hostname = None
conferenceMediaStoragePath = '/media/conference/' # NFS mount point
conferenceDBcurs = None
connect_db = None
def __init__(self, srvrole):
super(ncbDB, self).__init__(srvrole) # run constructor of parent object
self.hostname = socket.gethostname() # get local hostname TODO: validate hostname with that one in config file
self.conferenceMediaStoragePath = self.lconfig.get('media', 'media_path') # TODO: if valid to remove it above
try:
self.connect_db = pymysql.connect(self.db_server,
self.conferenceConfigDBname_user,
self.conferenceConfigDBname_passwd,
self.conferenceConfigDBname,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
# self.conferenceDBcurs = self.connect_db.cursor()
except pymysql.Error as er:
logging.critical("Can not establish connection to configuration DB: %s", self.conferenceConfigDBname)
raise Exception(er[0], er[1])
# the method executes SQL query and returns all fetched rows. Otherwise it returns None
def ncb_getQuery(self, querySQL):
result = []
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
result = self.conferenceDBcurs.fetchall()
return True, result
except pymysql.ProgrammingError as er:
logging.critical('ERROR: ProgrammingError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical('ERROR: InternallError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.Error as er:
logging.critical('ERROR: Can not get a data from Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
# the method executes SQL query to push data into DB.
def ncb_pushQuery(self, querySQL):
try:
with self.connect_db.cursor() as self.conferenceDBcurs:
self.conferenceDBcurs.execute(querySQL)
self.connect_db.commit()
return (True, [])
except pymysql.Error as er:
logging.critical('ERROR: Can not push a data into Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.IntegrityError as er:
logging.critical('ERROR: IntegrityError in Conference DB. Call to support immediately %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.OperationalError as er:
logging.critical('ERROR: OperationalError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
except pymysql.InternalError as er:
logging.critical('ERROR: InternallError in Conference DB. Call to support immediately - %s, %s' % (er[0], er[1]))
return False, er[1]
# if more than one rows are retrieved - it gets first row from the list as a dictionary
def listdicttodict(self, listdict):
return listdict[1][0]
def getGlobalMediaPath(self):
if not os.path.exists(self.conferenceMediaStoragePath): # check it out whether it exist
return None # if it doesn't - return None
else:
return self.conferenceMediaStoragePath # otherwise return the path
def __del__(self):
self.connect_db.close()
|
8,345 | 15ca54aff4c688733c9c514ba5856e6bf29a3292 | """
Compare 1-D analytical sphere solution to 1-D numerical and 3-D Comsol solutions
for transient heat conduction in solid sphere with constant k and Cp.
Assumptions:
Convection boundary condition at surface.
Symmetry about the center of the solid.
Heat transfer via radiation assumed to be negligable.
Particle does not shrink or expand in size during pyrolysis.
Reference: Wood Handbook 2010
Requirements: Python 3, NumPy, SciPy, Matplotlib, funcHeatCond, funcTheta, funcOther
"""
import numpy as np
import matplotlib.pyplot as py
from funcHeatCond import hc3
from funcTheta import theta
from funcOther import vol, Tvol
# Parameters
# -----------------------------------------------------------------------------
d = 0.001 # diameter of sphere, m
Gb = 0.54 # basic specific gravity, Wood Handbook Table 4-7, (-)
cp = 1800 # heat capacity, J/kg*K
k = 0.12 # thermal conductivity, W/mK
x = 0 # moisture content, %
h = 350 # heat transfer coefficient, W/m^2*K
Ti = 293 # initial particle temp, K
Tinf = 773 # ambient temp, K
# 1D Numerical Solution for Transient Heat Conduction in Solid Sphere
# -----------------------------------------------------------------------------
# number of nodes from center of particle (m=0) to surface (m)
m = 1000
# time vector from 0 to max time
tmax = 4.0 # max time, s
dt = 0.01 # time step, s
nt = tmax/dt # number of time steps
t = np.arange(0, tmax+dt, dt) # time vector, s
# intraparticle temperature array [T] in Kelvin
# row = time step, column = node point from 0 (center) to m (surface)
T = hc3(d, cp, k, Gb, h, Ti, Tinf, 2, m, t)
Tavg = [np.mean(row) for row in T]
# volume average temperature at each time step
v = vol(d, m)
Tv = Tvol(T, v)
# 1D Analytical Solution for Transient Heat Conduction in Solid Sphere
# -----------------------------------------------------------------------------
ro = d/2 # radius of sphere (a.k.a outer radius), m
rs = ro/ro # dimensionless surface radius, (-)
rc = 1e-12/ro # dimensionless center radius, (-)
z = np.arange(0, 1250, 0.1) # range to evaluate the zeta, Bi equation
z[0] = 1e-12 # prevent divide by zero warning
rho = Gb*1000 # density, kg/m^3
alpha = k/(rho*cp) # thermal diffusivity biomass, m^2/s
Bi = (h*ro)/k # Biot number, (-)
Fo = (alpha * t) / (ro**2) # Fourier number, (-)
# surface temperature where ro for outer surface, b=2 for sphere
thetaRo = theta(rs, 2, z, Bi, Fo) # dimensionless temperature profile
T_o = Tinf + thetaRo*(Ti-Tinf) # convert theta to temperature in Kelvin, K
# center temperature where r for center, b=2 for sphere
thetaR = theta(rc, 2, z, Bi, Fo) # dimensionless temperature profile
T_r = Tinf + thetaR*(Ti-Tinf) # convert theta to temperature in Kelvin, K
# 3D Solid Sphere Temperature Data from Comsol
# -----------------------------------------------------------------------------
sphere = 'comsol/3d-sphere-temps.txt'
t_sphere, Tv_sphere, Tc_sphere, Ts_sphere = np.loadtxt(sphere, skiprows=5, unpack=True)
# Plot Results
# -----------------------------------------------------------------------------
py.ion()
py.close('all')
def despine():
ax = py.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
py.tick_params(axis='both', bottom='off', top='off', left='off', right='off')
py.figure(1)
py.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')
py.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')
py.plot(t, T[:, -1], 'r-', lw=2, label='Ts_num')
py.plot(t, T[:, 0], 'b-', lw=2, label='Tc_num')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('1-D Analytical and 1-D Numerical')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
py.figure(2)
py.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')
py.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')
py.plot(t_sphere, Ts_sphere, 'r-', lw=2, label='Ts_3d')
py.plot(t_sphere, Tc_sphere, 'b-', lw=2, label='Tc_3d')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('1-D Analytical and 3-D Comsol')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
py.figure(3)
py.plot(t_sphere, Ts_sphere, 'o', mec='r', mew=2, mfc='none', label='Ts_3d' )
py.plot(t_sphere, Tc_sphere, 's', mec='b', mew=2, mfc='none', label='Tc_3d')
py.plot(t_sphere, Tv_sphere, '^', mec='g', mew=2, mfc='none', label='Tv_3d')
py.plot(t, T[:, -1], 'r-', lw=2, label='Ts_1d')
py.plot(t, T[:, 0], 'b-', lw=2, label='Tc_1d')
py.plot(t, Tv, 'g-', lw=2, label='Tv_1d')
#py.plot(t, Tavg, 'y-', lw=2, label='Tavg_1d')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('1-D Numerical and 3-D Comsol')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
|
8,346 | 3b41bd59c133bb04dae3aa48dc0699388d5bf3d4 | import os
import json
import random
chapter_mode = True
setname = 'test_other'
use_chapter = '_chapter'
minlen = 1000
maxlen = 1000
context = '_1000'
info_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)
book_ID_mapping = {}
with open('speaker_book.txt') as fin:
for line in fin:
elems = line.split('|')
ID = elems[0].lstrip().strip()
speaker = elems[1].lstrip().strip()
subset = elems[3].lstrip().strip()
book = elems[5].lstrip().strip()
if (speaker, book) not in book_ID_mapping:
book_ID_mapping[(speaker, book)] = [ID]
else:
book_ID_mapping[(speaker, book)].append(ID)
with open(info_json) as fin:
spk_bookwords = json.load(fin)
worddict = set()
with open('../all_rare_words.txt') as fin:
for line in fin:
word = line.strip()
worddict.add(word)
worddict_full = {}
with open('word_freq.txt') as fin:
for line in fin:
word, freq = line.split()
worddict_full[word] = int(freq)
spk_book_KB = {}
KBfulllist = set()
for speaker, books in spk_bookwords.items():
# spk_book_KB[speaker] = {}
for book, content in books.items():
speaker_book_IDs = book_ID_mapping[(speaker, book)] if 'chapter' not in info_json else [speaker]
for speaker_book_ID in speaker_book_IDs:
spk_book_KB[speaker_book_ID] = []
bookwords = content['bookwords']
oovwords = content['oovwords']
for word in bookwords:
if word in worddict:
spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0))
if word not in KBfulllist:
KBfulllist.add(word)
for word in oovwords:
if word in worddict:
spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0))
if word not in KBfulllist:
KBfulllist.add(word)
full_wordlist = list(KBfulllist)
output_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)
os.system('mkdir -p {}'.format(output_path))
worddict = list(worddict)
for ID, KB in spk_book_KB.items():
random.shuffle(worddict)
count = 0
while len(KB) < minlen and count < len(worddict):
word = worddict[count]
freq = worddict_full[word] if word in worddict_full else 0
if (word, freq) not in KB:
KB.append((word, freq))
count += 1
KB.sort(key=lambda tup: tup[1])
with open(os.path.join(output_path, ID), 'w') as fout:
for word, freq in KB[:maxlen]:
fout.write(word+'\n')
|
8,347 | 27fc11ae68531c7dbafdcf134f0eef019210e2de | from django import forms
from django.forms import widgets
from tsuru_dashboard import settings
import requests
class ChangePasswordForm(forms.Form):
old = forms.CharField(widget=forms.PasswordInput())
new = forms.CharField(widget=forms.PasswordInput())
confirm = forms.CharField(widget=forms.PasswordInput())
class PasswordRecoveryForm(forms.Form):
email = forms.EmailField()
token = forms.CharField()
def send(self):
url = "{0}/users/{1}/password?token={2}".format(
settings.TSURU_HOST,
self.cleaned_data['email'],
self.cleaned_data['token']
)
requests.post(url)
class TokenRequestForm(forms.Form):
email = forms.EmailField()
def send(self):
url = "{0}/users/{1}/password".format(settings.TSURU_HOST,
self.cleaned_data['email'])
requests.post(url)
class LoginForm(forms.Form):
username = forms.EmailField(max_length=60, widget=forms.TextInput(attrs={'placeholder': 'Username'}))
password = forms.CharField(widget=widgets.PasswordInput(attrs={'placeholder': 'Password'}), min_length=6)
class AddUserToTeamForm(forms.Form):
def __init__(self, teams=None, *args, **kwargs):
super(AddUserToTeamForm, self).__init__(*args, **kwargs)
if teams:
choices = []
for team in teams:
choices.append((team, team))
self.fields["team"].choices = choices
email = forms.EmailField(max_length=60)
team = forms.ChoiceField(choices=[])
class SignupForm(forms.Form):
email = forms.EmailField(max_length=60)
password = forms.CharField(widget=widgets.PasswordInput, min_length=6)
same_password_again = forms.CharField(widget=widgets.PasswordInput,
min_length=6)
def clean(self):
cleaned_data = super(SignupForm, self).clean()
password = cleaned_data.get("password")
same_password_again = cleaned_data.get("same_password_again")
if not password == same_password_again:
msg = "You must type the same password twice!"
self._errors["same_password_again"] = self.error_class([msg])
raise forms.ValidationError(msg)
return cleaned_data
class KeyForm(forms.Form):
name = forms.CharField()
key = forms.CharField(widget=forms.Textarea)
|
8,348 | 73e6930c6866d3ccdbccec925bfc5e7e4702feb9 | """
eulerian_path.py
An Eulerian path, also called an Euler chain, Euler trail, Euler walk, or "Eulerian" version of any of these
variants, is a walk on the graph edges of a graph which uses each graph edge in the original graph exactly once.
A connected graph has an Eulerian path iff it has at most two graph vertices of odd degree.
The EulerianPath class represents a data type
* for finding an Eulerian path in a graph.
* An Eulerian path is a path (not necessarily simple) that
* uses every edge in the graph exactly once.
* This implementation uses a non-recursive depth-first search.
* The constructor takes Theta(E + V) time in the worst
* case, where E is the number of edges and V is
* the number of vertices.
* Each instance method takes Theta(1) time.
* It uses Theta(E + V) extra space in the worst case
* (not including the digraph).
"""
from graphs.edge import Edge
from graphs.graph import Graph
from collections import deque, defaultdict
class EulerianPath:
_path = deque() # stack
class EEdge(Edge):
def __init__(self, v=0, w=0, is_used=False):
super().__init__(v, w)
self._is_used = is_used
def get_is_used(self):
return self._is_used
def __repr__(self):
return f'<{self.__class__.__name__}(' \
f'v={super().get_v()}, ' \
f'w={super().get_w()}, ' \
f'weight={super().weight()}, ' \
f'_is_used={self.get_is_used()})>'
def __init__(self, g):
# find vertex from which to start potential Eulerian path:
# a vertex v with odd degree(v) if it exits;
# otherwise a vertex with degree(v) > 0
self.g = g
odd_degree_vertices = 0
self.adj = deque()
for v in range(g.get_V()):
self.adj.append(EulerianPath.EEdge(v))
s = EulerianPath.__non_isolated_vertex(g)
for v in range(g.get_V()):
if g.degree(v) % 2 != 0:
odd_degree_vertices += 1
s = v
# Eulerian path iff it has at most two graph vertices of odd degree
if odd_degree_vertices > 2:
return
# special case for graph with 0 edges (has a degenerate Eulerian Path)
if s == -1:
s = 0
adj = self.adj
for v in range(g.get_V()):
self_loops = 0
for w in g.adj_vertices(v):
# careful with self loops
if v == w.item:
if self_loops % 2 == 0:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
self_loops += 1
elif v < w.item:
e = EulerianPath.EEdge(v, w.item)
adj[v].append(e)
adj[w.item].append(e)
stack = deque()
stack.append(s)
# greedily search through edges in iterative DFS style
while stack is not None:
v = stack.pop()
while adj[v] is not None:
edge = adj[v].popleft()
print(edge)
if edge.get_is_used():
continue
edge._is_used = True
stack.append(v)
v = edge.other(v)
# push vertex with no more leaving edges to path
self.get_path().append(v)
# check if all edges are used
if len(self.get_path()) != g.get_E() + 1:
self._path = None
def get_path(self):
return self._path
def path(self):
yield from list(self.get_path())
def has_eulerian_path(self):
return self.get_path() is not None
@staticmethod
def __non_isolated_vertex(g):
for v in range(g.get_V()):
if g.degree(v) > 0:
return v
return -1
def __repr__(self):
return f'adj = {self.adj}, \n' \
f'path={self.get_path()}'
def main():
g = Graph(13)
with open("../resources/tinyG.txt", ) as f:
for line in f.readlines():
vertices = " ".join(line.splitlines()).split(' ')
if len(vertices) < 2:
continue
else:
v1, v2 = int(vertices[0]), int(vertices[1])
g.add_edge(v1, v2)
print(g)
euler = EulerianPath(g)
print(euler)
print('Eulerian path: ')
if euler.has_eulerian_path():
for v in euler.path():
print(f'{v} ')
print()
else:
print('None')
print()
if __name__ == '__main__':
main()
|
8,349 | c00a8bfec46ed829e413257bf97c44add564080d | #!/usr/bin/env python
import rospy
import rosnode
import csv
import datetime
import rosbag
import sys
import os
import matplotlib.pyplot as plt
import argparse
import math
from math import hypot
import numpy as np
from sensor_msgs.msg import LaserScan
from std_msgs.msg import String
import yaml as yaml
start_time = None
value_dict = {}
combine = False
#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario1/fahrt3.bag'
#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario2/fahrt1.bag'
#bag_dir = '/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/reference_bag.bag'
'''
"rosservice call /change_material \"{name: \"Gazebo/Grey\", reflectivity: 0.2, transmission:\
\ 0.0, absorption: 0.1, angular_factor: 0.3}\""
'''
def compute_std(mean, liste):
temp = []
for item in liste:
temp.append((mean - item)**2)
nm = sum(temp)/ float(len(temp))
return math.sqrt(nm)
def load_file(filePath,file_name):
dict_ = {}
rospy.loginfo("Loading: %s",filePath+"/"+file_name)
try:
rospy.loginfo("Loading: %s",file_name)
file = open(filePath+file_name,'r')
dict_ = yaml.load(file)
except yaml.YAMLError as exc:
print(exc)
rospy.logerr('Failed to load: %s From: %s',file_name,filePath)
file.close()
return dict_
def get_params(temp):
p = {}
#temp = temp.split("{")[1]
temp = temp.split(",")
temp2 = temp[1].split(":")[1]
p['reflectivity']=float(temp2.replace(" ", "").replace("\\",""))
temp2 = temp[2].split(":")[1]
temp2 = temp2.replace("\\","").replace(" ","")
p['transmission'] = float(temp2)
temp2 = temp[3].split(":")[1]
temp2 = temp2.replace("\\","").replace(" ","")
p['absorption'] = float(temp2)
temp2 = temp[4].split(":")[1]
temp2 = temp2.replace("\\","").replace(" ","")
temp2 = temp2.replace("}","").replace("\"","")
p['angular_factor'] = float(temp2)
return p
def init():
rospy.init_node("monitoring_bag_topic_extract")
def get_bag_data():
path = "/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/"
ref = "reference_angels.yaml"
ref_dict = load_file(path,ref)
angels = ref_dict['angels2']
indexes = ref_dict['index']
ranges = ref_dict['ranges']
for f in os.listdir(path):
if rospy.is_shutdown():
break
if f.startswith("bag") and f.endswith(".bag"):
print "Loading Bag: "+path+f
bag = rosbag.Bag(path+f)
params = {}
scans = []
for topic, msg, t in bag.read_messages():
if topic == "/material_laser_scan":
scans.append(msg.ranges)
if topic == "/info_vals" and not params:
params = get_params(msg.data.split("{")[1])
# compute mean_err, std_dev, data_loss per value
scan_info = {}
for scan in scans:
for idx, val in enumerate(scan):
if idx in indexes:
#all val should be on the plate
i = indexes.index(idx)
if idx not in scan_info.keys():
#print str(val)
scan_info[idx] = [0,0,0.0,[],0.0,0.0]
scan_info[idx][4] = round(ranges[i], 5)
scan_info[idx][5] = angels[i]
if val <= 0.8:
scan_info[idx][1] +=1
scan_info[idx][2] +=val
scan_info[idx][3].append(val)
else:
scan_info[idx][0] +=1
final_d = {}
final_d["params"] = params
for key in scan_info.keys():
final_d[key] = {}
final_d[key]['ref_range'] = scan_info[key][4]
final_d[key]['angle'] = scan_info[key][5]
if scan_info[key][3]:
#if there is at least one element
mean = scan_info[key][2] / scan_info[key][1]
final_d[key]['mean_range'] = mean
std = compute_std(mean, scan_info[key][3])
final_d[key]['stdev'] = std
final_d[key]['loss'] = float(scan_info[key][0])/float((scan_info[key][1]+scan_info[key][0]))
else:
final_d[key]['mean_range'] = 0.0
final_d[key]['stdev'] = 0.0
final_d[key]['loss'] = 1.0
f1 = yaml.dump(final_d, default_flow_style=False)
try:
f = open('/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/yaml/'+f+'.yaml','w')
f.write(f1)
f.close()
except Exception as inst:
rospy.loginfo('%s',str(inst))
if __name__ == '__main__':
init()
get_bag_data()
|
8,350 | a52762fb13c04ced07a41a752578c4173d1eac42 | from queue import Queue
class Node():
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def array_to_tree_dfs(array):
n = len(array)
if n>0:
root = Node(array[0])
def dfs(node, index):
# if index >= n:
# return
# else:
if 2*(index+1) -1 < n and array[2*(index+1) -1] is not None:
node.left = Node(array[2*(index+1) -1])
dfs(node.left, 2*(index+1) -1)
if 2*(index+1) < n and array[2*(index+1)] is not None:
node.right = Node(array[2*(index+1)])
dfs(node.right, 2*(index+1))
return
dfs(root, 0)
return root
def tree_to_array_bfs(root):
q = Queue(maxsize = 0) # queue with infinity size
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
# stop condition
if(node is None):
return value
if(node.value == target):
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
# recursion part
if(node.value > target):
return dfs(node.left)
elif(node.value < target):
return dfs(node.right)
return dfs(tree)
if __name__ == '__main__':
array = [5,3,10,2,4,8] + [None]*6 + [9] + [None]*2
root = array_to_tree_dfs(array)
new_array = tree_to_array_bfs(root)
print(new_array)
print(findClosestValueInBst(root, 6))
|
8,351 | 12396130dc52866cc54d6dc701cf0f9a41a168b6 | from PyInstaller.utils.hooks import collect_data_files
hiddenimports = ['sklearn.utils.sparsetools._graph_validation',
'sklearn.utils.sparsetools._graph_tools',
'sklearn.utils.lgamma',
'sklearn.utils.weight_vector']
datas = collect_data_files('sklearn') |
8,352 | 8ca16947054b681a5f43d8b8029191d031d3a218 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Swking
@File : ZDT.py
@Date : 2018/12/28
@Desc :
"""
import numpy as np
class ZDT1:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - ((Y[0] / g)**0.5))
return Y
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (np.sqrt(Y[0] / g)) - (Y[0] / g) * np.sin(10 * np.pi * Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - 10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - (np.sqrt(Y[0] / g)))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * (np.sin(6 * np.pi * X[0]) ** 6)
g = 1 + 9 * (np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25)
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
if __name__ == '__main__':
zdt = ZDT1()
print(zdt.Func(np.ones(zdt.dimension))) |
8,353 | 55ffcf5e6120cc07da461e30979dd8a36a599bee | #-------------------------------------------------------------------------------
# rtlconverter.py
#
# PyCoRAM RTL Converter
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
import sys
import os
import subprocess
import copy
import collections
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )
import utils.version
if sys.version_info[0] >= 3:
from rtlconverter.convertvisitor import InstanceConvertVisitor
from rtlconverter.convertvisitor import InstanceReplaceVisitor
else:
from convertvisitor import InstanceConvertVisitor
from convertvisitor import InstanceReplaceVisitor
import pyverilog.utils.signaltype as signaltype
from pyverilog.utils.scope import ScopeLabel, ScopeChain
import pyverilog.vparser.ast as vast
from pyverilog.vparser.parser import VerilogCodeParser
from pyverilog.dataflow.modulevisitor import ModuleVisitor
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print("----------------------------------------")
print("CoRAM Objects in User-defined RTL")
for mode, coram_items in coram_object.items():
print(" CoRAM %s" % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(coram_items, key=lambda x:x[1]):
print(" %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)" %
(mode, idx, ( '' if subid is None else ''.join( ('[', str(subid), ']') ) ),
threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist,
preprocess_include=self.include,
preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable, self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports,
replaced_items,
new_moduleinfotable)
ret = instancereplace_visitor.getAST()
# gather user-defined io-ports on top-module and parameters to connect external
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i): continue
top_ioports.append(i)
top_scope = ScopeChain( [ScopeLabel(self.topmodule, 'module')] )
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2: continue
signame = sk[1].scopename
for svv in sv:
if (signame in top_ioports and
not (signaltype.isClock(signame) or signaltype.isReset(signame)) and
isinstance(svv, vast.Input) or isinstance(svv, vast.Output) or isinstance(svv, vast.Inout)):
port = svv
msb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.msb, top_scope))
lsb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.lsb, top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = (port, width)
break
for ck, cv in top_params.items():
if len(ck) > 2: continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar): continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
def main():
from optparse import OptionParser
INFO = "PyCoRAM RTL Converter"
VERSION = utils.version.VERSION
USAGE = "Usage: python rtlconverter.py -t TOPMODULE file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-t","--top",dest="topmodule",
default="userlogic",help="Top module, Default=userlogic")
optparser.add_option("-o","--output",dest="outputfile",
default="out.v",help="Output file name, Default=out.v")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
optparser.add_option("--singleclock",action="store_true",dest="single_clock",
default=False,help="Use single clock mode")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
converter = RtlConverter(filelist, options.topmodule,
include=options.include,
define=options.define,
single_clock=options.single_clock)
ast = converter.generate()
converter.dumpCoramObject()
asttocode = ASTCodeGenerator()
code = asttocode.visit(ast)
f = open(options.outputfile, 'w')
f.write(code)
f.close()
if __name__ == '__main__':
main()
|
8,354 | 2e6f04c3ff3e47a2c3e9f6a7d93e7ce2955a2756 | from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
import hashlib
from xml.sax.saxutils import escape
from struct import unpack, pack
import textwrap
import json
from .anconf import warning, error, CONF, enable_colors, remove_colors, save_colors, color_range
def disable_print_colors():
colors = save_colors()
remove_colors()
return colors
def enable_print_colors(colors):
enable_colors(colors)
# Handle exit message
def Exit(msg):
warning("Error : " + msg)
raise ("oops")
def Warning(msg):
warning(msg)
def _PrintBanner():
print_fct = CONF["PRINT_FCT"]
print_fct("*" * 75 + "\n")
def _PrintSubBanner(title=None):
print_fct = CONF["PRINT_FCT"]
if title == None:
print_fct("#" * 20 + "\n")
else:
print_fct("#" * 10 + " " + title + "\n")
def _PrintNote(note, tab=0):
print_fct = CONF["PRINT_FCT"]
note_color = CONF["COLORS"]["NOTE"]
normal_color = CONF["COLORS"]["NORMAL"]
print_fct("\t" * tab + "%s# %s%s" % (note_color, note, normal_color) + "\n")
# Print arg into a correct format
def _Print(name, arg):
buff = name + " "
if type(arg).__name__ == 'int':
buff += "0x%x" % arg
elif type(arg).__name__ == 'long':
buff += "0x%x" % arg
elif type(arg).__name__ == 'str':
buff += "%s" % arg
elif isinstance(arg, SV):
buff += "0x%x" % arg.get_value()
elif isinstance(arg, SVs):
buff += arg.get_value().__str__()
print(buff)
def PrettyShowEx(exceptions):
if len(exceptions) > 0:
CONF["PRINT_FCT"]("Exceptions:\n")
for i in exceptions:
CONF["PRINT_FCT"]("\t%s%s%s\n" %
(CONF["COLORS"]["EXCEPTION"], i.show_buff(),
CONF["COLORS"]["NORMAL"]))
def _PrintXRef(tag, items):
print_fct = CONF["PRINT_FCT"]
for i in items:
print_fct("%s: %s %s %s %s\n" %
(tag, i[0].get_class_name(), i[0].get_name(),
i[0].get_descriptor(), ' '.join("%x" % j.get_idx()
for j in i[1])))
def _PrintDRef(tag, items):
print_fct = CONF["PRINT_FCT"]
for i in items:
print_fct("%s: %s %s %s %s\n" %
(tag, i[0].get_class_name(), i[0].get_name(),
i[0].get_descriptor(), ' '.join("%x" % j for j in i[1])))
def _PrintDefault(msg):
print_fct = CONF["PRINT_FCT"]
print_fct(msg)
def PrettyShow(m_a, basic_blocks, notes={}):
idx = 0
nb = 0
offset_color = CONF["COLORS"]["OFFSET"]
offset_addr_color = CONF["COLORS"]["OFFSET_ADDR"]
instruction_name_color = CONF["COLORS"]["INSTRUCTION_NAME"]
branch_false_color = CONF["COLORS"]["BRANCH_FALSE"]
branch_true_color = CONF["COLORS"]["BRANCH_TRUE"]
branch_color = CONF["COLORS"]["BRANCH"]
exception_color = CONF["COLORS"]["EXCEPTION"]
bb_color = CONF["COLORS"]["BB"]
normal_color = CONF["COLORS"]["NORMAL"]
print_fct = CONF["PRINT_FCT"]
colors = CONF["COLORS"]["OUTPUT"]
for i in basic_blocks:
print_fct("%s%s%s : \n" % (bb_color, i.get_name(), normal_color))
instructions = i.get_instructions()
for ins in instructions:
if nb in notes:
for note in notes[nb]:
_PrintNote(note, 1)
print_fct("\t%s%-3d%s(%s%08x%s) " %
(offset_color, nb, normal_color, offset_addr_color, idx,
normal_color))
print_fct("%s%-20s%s" %
(instruction_name_color, ins.get_name(), normal_color))
operands = ins.get_operands()
print_fct(
"%s" %
", ".join(m_a.get_vm().colorize_operands(operands, colors)))
op_value = ins.get_op_value()
if ins == instructions[-1] and i.childs:
print_fct(" ")
# packed/sparse-switch
if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1:
values = i.get_special_ins(idx).get_values()
print_fct("%s[ D:%s%s " %
(branch_false_color, i.childs[0][2].get_name(),
branch_color))
print_fct(' '.join("%d:%s" % (
values[j], i.childs[j + 1][2].get_name()) for j in
range(0, len(i.childs) - 1)) + " ]%s" %
normal_color)
else:
if len(i.childs) == 2:
print_fct("%s[ %s%s " % (branch_false_color,
i.childs[0][2].get_name(),
branch_true_color))
print_fct(' '.join("%s" % c[2].get_name(
) for c in i.childs[1:]) + " ]%s" % normal_color)
else:
print_fct("%s[ " % branch_color + ' '.join(
"%s" % c[2].get_name() for c in i.childs) + " ]%s" %
normal_color)
idx += ins.get_length()
nb += 1
print_fct("\n")
if i.get_exception_analysis():
print_fct("\t%s%s%s\n" %
(exception_color, i.exception_analysis.show_buff(),
normal_color))
print_fct("\n")
class TmpBlock(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
def method2json(mx, directed_graph=False):
if directed_graph:
return method2json_direct(mx)
return method2json_undirect(mx)
def method2json_undirect(mx):
d = {}
reports = []
d["reports"] = reports
for DVMBasicMethodBlock in mx.basic_blocks.gets():
cblock = {}
cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name()
cblock["registers"] = mx.get_method().get_code().get_registers_size()
cblock["instructions"] = []
ins_idx = DVMBasicMethodBlock.start
for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(
):
c_ins = {}
c_ins["idx"] = ins_idx
c_ins["name"] = DVMBasicMethodBlockInstruction.get_name()
c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands(
ins_idx)
cblock["instructions"].append(c_ins)
ins_idx += DVMBasicMethodBlockInstruction.get_length()
cblock["Edge"] = []
for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:
cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name())
reports.append(cblock)
return json.dumps(d)
def method2json_direct(mx):
d = {}
reports = []
d["reports"] = reports
hooks = {}
l = []
for DVMBasicMethodBlock in mx.basic_blocks.gets():
for index, DVMBasicMethodBlockChild in enumerate(
DVMBasicMethodBlock.childs):
if DVMBasicMethodBlock.get_name(
) == DVMBasicMethodBlockChild[-1].get_name():
preblock = TmpBlock(DVMBasicMethodBlock.get_name() + "-pre")
cnblock = {}
cnblock["BasicBlockId"] = DVMBasicMethodBlock.get_name(
) + "-pre"
cnblock["start"] = DVMBasicMethodBlock.start
cnblock["notes"] = []
cnblock["Edge"] = [DVMBasicMethodBlock.get_name()]
cnblock["registers"] = 0
cnblock["instructions"] = []
cnblock["info_bb"] = 0
l.append(cnblock)
for parent in DVMBasicMethodBlock.fathers:
hooks[parent[-1].get_name()] = []
hooks[parent[-1].get_name()].append(preblock)
for idx, child in enumerate(parent[-1].childs):
if child[-1].get_name() == DVMBasicMethodBlock.get_name(
):
hooks[parent[-1].get_name()].append(child[-1])
for DVMBasicMethodBlock in mx.basic_blocks.gets():
cblock = {}
cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name()
cblock["start"] = DVMBasicMethodBlock.start
cblock["notes"] = DVMBasicMethodBlock.get_notes()
cblock["registers"] = mx.get_method().get_code().get_registers_size()
cblock["instructions"] = []
ins_idx = DVMBasicMethodBlock.start
last_instru = None
for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(
):
c_ins = {}
c_ins["idx"] = ins_idx
c_ins["name"] = DVMBasicMethodBlockInstruction.get_name()
c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands(
ins_idx)
c_ins["formatted_operands"
] = DVMBasicMethodBlockInstruction.get_formatted_operands()
cblock["instructions"].append(c_ins)
if (DVMBasicMethodBlockInstruction.get_op_value() == 0x2b or
DVMBasicMethodBlockInstruction.get_op_value() == 0x2c):
values = DVMBasicMethodBlock.get_special_ins(ins_idx)
cblock["info_next"] = values.get_values()
ins_idx += DVMBasicMethodBlockInstruction.get_length()
last_instru = DVMBasicMethodBlockInstruction
cblock["info_bb"] = 0
if DVMBasicMethodBlock.childs:
if len(DVMBasicMethodBlock.childs) > 1:
cblock["info_bb"] = 1
if (last_instru.get_op_value() == 0x2b or
last_instru.get_op_value() == 0x2c):
cblock["info_bb"] = 2
cblock["Edge"] = []
for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:
ok = False
if DVMBasicMethodBlock.get_name() in hooks:
if DVMBasicMethodBlockChild[-1] in hooks[
DVMBasicMethodBlock.get_name()
]:
ok = True
cblock["Edge"].append(hooks[DVMBasicMethodBlock.get_name(
)][0].get_name())
if not ok:
cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name())
exception_analysis = DVMBasicMethodBlock.get_exception_analysis()
if exception_analysis:
cblock["Exceptions"] = exception_analysis.get()
reports.append(cblock)
reports.extend(l)
return json.dumps(d)
class SV(object):
def __init__(self, size, buff):
self.__size = size
self.__value = unpack(self.__size, buff)[0]
def _get(self):
return pack(self.__size, self.__value)
def __str__(self):
return "0x%x" % self.__value
def __int__(self):
return self.__value
def get_value_buff(self):
return self._get()
def get_value(self):
return self.__value
def set_value(self, attr):
self.__value = attr
class SVs(object):
def __init__(self, size, ntuple, buff):
self.__size = size
self.__value = ntuple._make(unpack(self.__size, buff))
def _get(self):
l = []
for i in self.__value._fields:
l.append(getattr(self.__value, i))
return pack(self.__size, *l)
def _export(self):
return [x for x in self.__value._fields]
def get_value_buff(self):
return self._get()
def get_value(self):
return self.__value
def set_value(self, attr):
self.__value = self.__value._replace(**attr)
def __str__(self):
return self.__value.__str__()
def object_to_bytes(obj):
"""
Convert a object to a bytearray or call get_raw() of the object
if no useful type was found.
"""
if isinstance(obj, str):
return bytearray(obj, "UTF-8")
elif isinstance(obj, bool):
return bytearray()
elif isinstance(obj, int):
return pack("<L", obj)
elif obj == None:
return bytearray()
elif isinstance(obj, bytearray):
return obj
else:
#print type(obj), obj
return obj.get_raw()
class MethodBC(object):
def show(self, value):
getattr(self, "show_" + value)()
class BuffHandle(object):
def __init__(self, buff):
self.__buff = bytearray(buff)
self.__idx = 0
def size(self):
return len(self.__buff)
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def readNullString(self, size):
data = self.read(size)
return data
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def read_at(self, offset, size):
return self.__buff[offset:offset + size]
def read(self, size):
if isinstance(size, SV):
size = size.value
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def end(self):
return self.__idx == len(self.__buff)
class Buff(object):
def __init__(self, offset, buff):
self.offset = offset
self.buff = buff
self.size = len(buff)
class _Bytecode(object):
def __init__(self, buff):
self.__buff = bytearray(buff)
self.__idx = 0
def read(self, size):
if isinstance(size, SV):
size = size.value
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def readat(self, off):
if isinstance(off, SV):
off = off.value
return self.__buff[off:]
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def add_idx(self, idx):
self.__idx += idx
def register(self, type_register, fct):
self.__registers[type_register].append(fct)
def get_buff(self):
return self.__buff
def length_buff(self):
return len(self.__buff)
def set_buff(self, buff):
self.__buff = buff
def save(self, filename):
buff = self._save()
with open(filename, "wb") as fd:
fd.write(buff)
def FormatClassToJava(input):
"""
Transoform a typical xml format class into java format
:param input: the input class name
:rtype: string
"""
return "L" + input.replace(".", "/") + ";"
def FormatClassToPython(input):
i = input[:-1]
i = i.replace("/", "_")
i = i.replace("$", "_")
return i
def FormatNameToPython(input):
i = input.replace("<", "")
i = i.replace(">", "")
i = i.replace("$", "_")
return i
def FormatDescriptorToPython(input):
i = input.replace("/", "_")
i = i.replace(";", "")
i = i.replace("[", "")
i = i.replace("(", "")
i = i.replace(")", "")
i = i.replace(" ", "")
i = i.replace("$", "")
return i
class Node(object):
def __init__(self, n, s):
self.id = n
self.title = s
self.children = []
|
8,355 | 1deab16d6c574bf532c561b8d6d88aac6e5d996c | # Importing datasets wrangling libraries
import numpy as np
import pandas as pd
incd_data = pd.read_csv('data/Cancer/incd.csv', usecols=['State', 'FIPS', 'Age-Adjusted Incidence Rate([rate note]) - cases per 100,000', 'Average Annual Count', 'Recent Trend'])
print(incd_data.columns)
|
8,356 | c0f4f9eef12d99d286f5ad56f6554c5910b7cc71 | users = {
'Students': [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
],
'Instructors': [
{'first_name' : 'Michael', 'last_name' : 'Choi'},
{'first_name' : 'Martin', 'last_name' : 'Puryear'}
]
}
def findUsers():
num = 1
something = 0
for i in range(len(users)):
for idx in range(len(users.values()[i])):
string = ""
string += "- " + users.values()[i][idx]['first_name'] + " " + users.values()[i][idx]['last_name']
print num, string, " - ", len(users.values()[i][idx]['first_name'] + users.values()[i][idx]['last_name'])
findUsers() |
8,357 | 4d0b08f8ca77d188aa218442ac0689fd2c057a89 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, shutil, time, pickle, warnings, logging
import yaml
from sklearn import preprocessing
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn import metrics
from scipy.special import erfinv
from scipy.stats import mode
warnings.filterwarnings('ignore')
def data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5, random_state=42):
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df)), y=df[col_stratified]))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_KFold(df, col_index, n_splits=5, random_state=42):
folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df))))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print("num group: {}".format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(
lambda x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'.format(fold + 1)]
for i in range(n_splits):
print("fold: {}, valid: {}. group: {}".format(
i + 1,
(df_new['fold{}_valid'.format(i + 1)] == 1).sum(),
len(fold_list[i]))
)
return df_new
def main():
df = pd.read_csv("../input/melanoma/train.csv")
if __name__ == '__main__':
main() |
8,358 | 84d096a51fa052ee210e975ab61c0cbbf05bc5ae | class Day8MemoryManeuver:
def __init__(self, use_reference_count=False):
"""
Args:
use_reference_count (bool):
True: If an entry has child nodes, the meta data are referring to the results of
the child node
False: Sum all meta data up
"""
self._use_child_references = use_reference_count
def solve(self, license_input):
_, result = self._solve(license_input.split(" "), 0)
return result
def _solve(self, structure, pos):
if pos >= len(structure):
return pos, 0
child_node_count = int(structure[pos])
pos += 1
meta_count = int(structure[pos])
result = 0
child_results = []
for i in range(child_node_count):
pos += 1
pos, tmp = self._solve(structure, pos)
if not self._use_child_references:
result += tmp
child_results.append(tmp)
if meta_count > 0:
for i in range(pos, pos + meta_count):
current = int(structure[i + 1])
if self._use_child_references and child_node_count > 0:
if current <= len(child_results):
result += child_results[current - 1]
else:
result += current
pos += 1
return pos, result
|
8,359 | e18ebf961c2daa7dd127d08f85edb6ea519e3470 | #!/usr/bin/python
"""
Expression Parser Tree for fully parenthesized input expression
"""
from bintree import BinaryTree
from stackModule import Stack
def buildParseTree(expression):
expList = expression.split()
empTree = BinaryTree('')
parentStack = Stack()
parentStack.push(empTree)
currentNode = empTree
for item in expList:
if item == '(':
currentNode.insertLeft('')
parentStack.push(currentNode)
currentNode = currentNode.getLeftChild()
elif item not in ['+', '-', '*', '/', ')']:
currentNode.setRootValue(int(item))
currentNode = parentStack.pop()
elif item in ['+', '-', '*', '/']:
currentNode.setRootValue(item)
currentNode.insertRight('')
parentStack.push(currentNode)
currentNode = currentNode.getRightChild()
elif item == ')':
currentNode = parentStack.pop()
else:
raise ValueError
return empTree
import operator
def evaluate(parseTree):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
leftC = parseTree.getLeftChild()
rightC = parseTree.getRightChild()
if leftC and rightC:
fn = opers[parseTree.getRootValue()]
return fn(evaluate(leftC),evaluate(rightC))
else:
return parseTree.getRootValue()
def postOrderTraversal(parseTree):
if parseTree != None:
postOrderTraversal(parseTree.getLeftChild())
postOrderTraversal(parseTree.getRightChild())
print parseTree.getRootValue()
def preOrderTraversal(parseTree):
if parseTree !=None:
print parseTree.getRootValue()
preOrderTraversal(parseTree.getLeftChild())
preOrderTraversal(parseTree.getRightChild())
def inOrderTraversal(parseTree):
if parseTree !=None:
inOrderTraversal(parseTree.getLeftChild())
print parseTree.getRootValue()
inOrderTraversal(parseTree.getRightChild())
def iterInOrder(currentTree):
pStack = Stack()
print "\nPrinting in order traversal\n"
while currentTree != None or not pStack.isEmpty():
if currentTree !=None:
pStack.push(currentTree)
currentTree = currentTree.getLeftChild()
else:
currentTree = pStack.pop()
print currentTree.getRootValue()
currentTree = currentTree.getRightChild()
pt = buildParseTree("( ( 10 + 5 ) * 3 )")
print "\nGiven Expression evaluates to %d\n" % evaluate(pt)
preOrderTraversal(pt)
postOrderTraversal(pt)
inOrderTraversal(pt)
iterInOrder(pt)
|
8,360 | a4c4a5cc63c345d1fa8cbf426f7857a0f3d4357f | # Generated by Django 3.2.4 on 2021-06-16 13:41
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('FAQ', '0004_auto_20210616_1253'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='link',
),
migrations.RemoveField(
model_name='question',
name='photo',
),
migrations.AlterField(
model_name='question',
name='answer',
field=ckeditor.fields.RichTextField(),
),
]
|
8,361 | ad94118b43e130aec5df3976fd0460164de17511 | #!/usr/bin/python
# coding: utf-8
# # import re
# # import urllib
# #
# #
# # def getHtml(url):
# # page = urllib.urlopen(url)
# # html = page.read()
# # return html
# #
# #
# # def getMp4(html):
# # r = r"href='(http.*\.mp4)'"
# # re_mp4 = re.compile(r)
# # mp4List = re.findall(re_mp4, html)
# # filename = 1
# # for mp4url in mp4List:
# # urllib.urlretrieve(mp4url, "%s.mp4" % filename)
# # print 'file "%s.mp4" done' % filename
# # filename += 1
# # url = "http://v.youku.com/v_show/id_XMjYxMjEyNDU0MA==.html"
# # html = getHtml(url)
# # getMp4(html)
#
#
#
#
# # import re
# #
# #
# # pattern = re.compile(r'hello world')
# # match = pattern.match('hello world!')
# #
# # if match:
# # print match.group()
#
#
# #
# # # 冒泡排序
# # array = [4, 5, 0, 2, 3, 7, 1, 6]
# #
# # for i in range(len(array) - 1, 1, -1):
# # for j in range(0, i):
# # if array[j] > array[j + 1]:
# # array[j], array[j + 1] = array[j + 1], array[j]
# # print array
#
# # theString = 'saaaay yes no yaaaass'
# # print theString.strip('say') #say后面有空格
#
#
#
# # -*- coding:utf-8 -*-
# import urllib
# import urllib2
# import re
# import thread
# import time
#
#
# # 糗事百科爬虫类
# class QSBK:
# # 初始化方法,定义一些变量
# def __init__(self):
# self.pageIndex = 1
# self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
# # 初始化headers
# self.headers = {'User-Agent': self.user_agent}
# # 存放段子的变量,每一个元素是每一页的段子们
# self.stories = []
# # 存放程序是否继续运行的变量
# self.enable = False
#
# # 传入某一页的索引获得页面代码
# def getPage(self, pageIndex):
# try:
# url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)
# # 构建请求的request
# request = urllib2.Request(url, headers=self.headers)
# # 利用urlopen获取页面代码
# response = urllib2.urlopen(request)
# # 将页面转化为UTF-8编码
# pageCode = response.read().decode('utf-8')
# return pageCode
#
# except urllib2.URLError, e:
# if hasattr(e, "reason"):
# print u"连接糗事百科失败,错误原因", e.reason
# return None
#
# # 传入某一页代码,返回本页不带图片的段子列表
# def getPageItems(self, pageIndex):
# pageCode = self.getPage(pageIndex)
# if not pageCode:
# print "页面加载失败...."
# return None
# # pattern = re.compile('<div class=author clearfix>.*?<img src=.*? alt=(.*?)>.*?<div.*?' +
# # '<span>(.*?)</span>.*?stats-vote><i class=number>(.*?)</i>.*?' +
# # '<i class=number>(.*?)</i>', re.S)
# pattern = re.compile('h2>(.*?)</h2.*?content">(.*?)</.*?number">(.*?)</', re.S)
# items = re.findall(pattern, pageCode)
# # 用来存储每页的段子们
# pageStories = []
# # 遍历正则表达式匹配的信息
# # for item in items:
# # # 是否含有图片
# # haveImg = re.search("img", item[3])
# # # 如果不含有图片,把它加入list中
# # if not haveImg:
# # replaceBR = re.compile('<br/>')
# # text = re.sub(replaceBR, "\n", item[1])
# # # item[0]是一个段子的发布者,item[1]是内容,item[2]是发布时间,item[4]是点赞数
# # pageStories.append([item[0].strip(), text.strip(), item[2].strip(), item[4].strip()])
# # return pageStories
# for item in items:
# pageStories.append([item[0].strip(), item[1].strip(), item[2].strip()])
# return pageStories
#
# # 加载并提取页面的内容,加入到列表中
# def loadPage(self):
# # 如果当前未看的页数少于2页,则加载新一页
# if self.enable == True:
# if len(self.stories) < 2:
# # 获取新一页
# pageStories = self.getPageItems(self.pageIndex)
# # 将该页的段子存放到全局list中
# if pageStories:
# self.stories.append(pageStories)
# # 获取完之后页码索引加一,表示下次读取下一页
# self.pageIndex += 1
#
# # 调用该方法,每次敲回车打印输出一个段子
# def getOneStory(self, pageStories, page):
# # 遍历一页的段子
# for story in pageStories:
# # 等待用户输入
# input = raw_input()
# # 每当输入回车一次,判断一下是否要加载新页面
# self.loadPage()
# # 如果输入Q则程序结束
# if input == "Q":
# self.enable = False
# return
# print u"第%d页\t发布人:%s\t 赞:%s\n%s" % (page, story[0], story[2], story[1])
#
# # 开始方法
# def start(self):
# print u"正在读取糗事百科,按回车查看新段子,Q退出"
# # 使变量为True,程序可以正常运行
# self.enable = True
# # 先加载一页内容
# self.loadPage()
# # 局部变量,控制当前读到了第几页
# nowPage = 0
# while self.enable:
# if len(self.stories) > 0:
# # 从全局list中获取一页的段子
# pageStories = self.stories[0]
# # 当前读到的页数加一
# nowPage += 1
# # 将全局list中第一个元素删除,因为已经取出
# del self.stories[0]
# # 输出该页的段子
# self.getOneStory(pageStories, nowPage)
#
#
# spider = QSBK()
# spider.start()
#
# print [x * x for x in range(1, 11) if x % 2 == 0]
def _odd_iter():
n = 1
while True:
n += 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter() # 初始序列
while True:
n = next(it) # 返回序列的第一个数
yield n
it = filter(_not_divisible(n), it) # 构造新序列
def main():
# 打印1000以内的素数:
for n in primes():
if n < 1000:
print(n)
else:
break
def is_palindrome(n):
return int(str(n)[::-1]) == n
def count():
def f(j):
# def g():
return j*j
# return g
fs = []
for i in range(1, 4):
fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()
return fs
from PIL import Image
def changeImage():
im = Image.open('C:/Users/Administrator/Desktop/1111.jpg')
print(im.format, im.size, im.mode)
im.thumbnail((1000, 500))
im.save('C:/Users/Administrator/Desktop/11111.jpg', 'JPEG')
from multiprocessing import Process, Pool
import os, time, random
def run_proc(name):
print("Run child process %s %s" % (name, os.getpid()))
def long_time_task(name):
print('Run task %s %s...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s run %0.2f' % (name, (start - end)))
def chinese_to_pinyin(x):
"""参数为字符串,返回为该字符串对应的汉语拼音"""
y = ''
# dic = {}
# with open("unicode_pinyin.txt") as f:
# for i in f.readlines():
# dic[i.split()[0]] = i.split()[1]
for i in x:
i = str(i.encode('unicode_escape'))[-5:-1].upper()
# try:
# y += dic[i] + ' '
# except:
# y += 'XXXX ' # 非法字符我们用XXXX代替
return y
if __name__ == '__main__':
# main()
# print(_not_divisible(3))
# output = filter(is_palindrome, range(1, 1000))
# print(list(output))
# print(range(100))[::-1]
# f1, f2, f3 = count()
# print(f1)
# print(f2)
# print(f3)
# changeImage()
# print("Parent process %s ", os.getpid())
# p = Process(target=run_proc, args=("test",))
# print('Child process will start.')
# p.start()
# p.join()
# print('Child process end.')
# print("Parent process %s ", os.getpid())
# p = Pool(5)
# for i in range(5):
# p.apply_async(long_time_task, args=(i,))
# print('Waiting for all subprocesses done...')
# p.close()
# p.join()
# print('All subprocesses done.')
print(chinese_to_pinyin(u"陈")) |
8,362 | 28851979c8f09f3cd1c0f4507eeb5ac2e2022ea0 | '''
Run from the command line with arguments of the CSV files you wish to convert.
There is no error handling so things will break if you do not give it a well
formatted CSV most likely.
USAGE: python mycsvtomd.py [first_file.csv] [second_file.csv] ...
OUTPUT: first_file.md second_file.md ...
'''
import sys
import csv
##import os
##import re
##import shutil
for arg in sys.argv[1:]:
## dir_name = arg.split(".")[0] + "_markdown"
##
## if os.path.exists(dir_name):
## shutil.rmtree(dir_name)
## # create a directory to store the results
## os.mkdir(dir_name)
if not arg.endswith('.csv'):
print 'Warning: {} does not end in .csv; skipping'.format(arg)
continue
# read in CSV file
with open(arg, 'rb') as f:
with open(arg[:-3]+'md', 'wb') as md:
reader = csv.reader(f)
# strip off CSV header for names of markdown headers
header = reader.next()
md.write('|'.join(header)+'\n')
for _ in header:
md.write('|')
md.write('\n')
# parse through each record/row and convert to markdown
for row in reader:
md.write('|'.join(row)+'\n')
print 'ok' |
8,363 | 37fdfddb471e2eec9e5867d685c7c56fc38c5ae7 | import json
import logging
import os
import sys
from io import StringIO
import pytest
from allure.constants import AttachmentType
from utils.tools import close_popups
_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)
# LOGGING console ####################################################################################################
# Reserved name for custom logging
logging.addLevelName(15, "SUBDEBUG")
logging.addLevelName(5, "TEST")
# Logger formating
log_formatter = logging.Formatter("%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
class CustomLogger(logging.Logger):
test_log = StringIO()
# Metod formating message
@staticmethod
def format_message(message):
return json.dumps(message, **_beautiful_json) if isinstance(message, (dict, list, tuple)) else str(message)
# Custom level of logging
def subdebug(self, message, *args, **kwargs):
if self.isEnabledFor(15):
self._log(15, message, args, **kwargs)
# Method to attached data to report (one class dependency)
def attach_debug(self, name, message):
if self.isEnabledFor(10):
pytest.allure.attach(name, self.format_message(message))
def attach_subdebug(self, name, message):
if self.isEnabledFor(15):
pytest.allure.attach(name, self.format_message(message))
def attach_info(self, name, message):
if self.isEnabledFor(20):
pytest.allure.attach(name, self.format_message(message))
def attach_error(self, name, message):
pytest.allure.attach(name, self.format_message(message))
@staticmethod
def attach_png(name, message):
pytest.allure.attach(name, message, type=AttachmentType.PNG)
def attach_selenium_screenshot(self, attach_name, selenium_driver):
if selenium_driver:
try:
close_popups(selenium_driver)
self.debug('Attach screenshot')
self.attach_png(attach_name, selenium_driver.get_screenshot_as_png())
self.debug('...Done')
except Exception as e:
self.error('Cannot get screenshot from SeleniumWebDriver')
pytest.allure.attach(attach_name, str(e))
else:
self.error('No browser is define')
def add_handler(self, file_name, mode='a'):
file_handler = logging.FileHandler(filename=file_name, mode=mode)
file_handler.setFormatter(log_formatter)
file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))
self.addHandler(file_handler)
def setup_logging():
# Logging setup
logger = CustomLogger('root')
# Level of handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))
# Create a method of message
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
# Level of handler
string_io = logging.StreamHandler(logger.test_log)
string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))
# Create a method of message
string_io.setFormatter(log_formatter)
logger.addHandler(string_io)
return logger
logger = setup_logging()
|
8,364 | 9320926c9eb8a03d36446f3692f11b242c4fc745 | #!/usr/bin/env python3
# coding=utf-8
# date 2020-10-22 10:54:38
# author calllivecn <c-all@qq.com>
import sys
import random
import asyncio
import argparse
def httpResponse(msg):
response = [
"HTTP/1.1 200 ok",
"Server: py",
"Content-Type: text/plain",
"Content-Length: " + str(len(msg)),
"\r\n",
]
return "\r\n".join(response).encode("utf8") + msg
async def echo(reader, writer):
#t = random.randint(100, 3000)/1000
#await asyncio.sleep(t)
data = await reader.read(1024)
if not data:
return
writer.write(httpResponse(b"hello world!\n"))
await writer.drain()
async def handle(reader, writer):
try:
await echo(reader, writer)
except ConnectionResetError:
pass
finally:
writer.close()
try:
await writer.wait_closed()
except ConnectionResetError:
pass
def usage_uvloop():
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
print("需要安装uvloop(pip install --user uvloop)")
sys.exit(1)
def main():
parse = argparse.ArgumentParser()
parse.add_argument("--addr", action="store", default="*", help="listen 地址 (default: ipv4+ipv6)")
parse.add_argument("--port", action="store", type=int, default=6789, help="port (default: 6789)")
parse.add_argument("--uvloop", action="store_true", help="使用uvloop")
parse.add_argument("--parse", action="store_true", help=argparse.SUPPRESS)
args = parse.parse_args()
if args.parse:
parse.print_usage()
sys.exit(0)
if args.uvloop:
usage_uvloop()
else:
print("可以选使用uvloop加速")
async def server():
# server = await asyncio.start_server(handle, args.addr, args.port, reuse_address=True, reuse_port=True)
server = await asyncio.start_server(handle, args.addr, args.port, reuse_address=True, backlog=4096)
async with server:
await server.serve_forever()
print(f"listen: {args.addr}:{args.port}")
try:
asyncio.run(server())
except KeyboardInterrupt:
print("exit")
if __name__ == "__main__":
main() |
8,365 | 90a402cccf383ed6a12b70ecdc3de623e6e223f9 | def ex7(*siruri, x=1, flag=True):
res = ()
for sir in siruri:
chars = []
for char in sir:
if ord(char) % x == (not flag):
chars.append(char)
res += (chars,)
return res
print(ex7("test", "hello", "lab002", x=2, flag=False))
|
8,366 | 6e73625adc10064cdb1b5f0546a4fc7320e9f5dc | from django import template
import random
register = template.Library()
@register.simple_tag
def random_quote():
"""Returns a random quote to be displayed on the community sandwich page"""
quotes = [
"Growth is never by mere chance; it is the result of forces working together.\n-James Cash Penney",
"We cannot accomplish all that we need to do without working together\n-Bill Richardson",
"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Gloria Macapagal Arroyo",
"The power of one, if fearless and focused, is formidable, but the power of many working together is better.\n-Jacqueline Novogratz",
"I love a sandwich that you can barely fit in your mouth because there's so much stuff on it. The bread should not be the main thing on a sandwich.\n-Adrianne Palicki",
"Communism will win.\n-Slavoj Zizek",
]
return random.choice(quotes)
|
8,367 | 158b39a64d725bdbfc78acc346ed8335613ae099 | #common method to delete data from a list
fruits=['orange','apple','mango','grapes','banana','apple','litchi']
#l=[]
#[l.append(i) for i in fruits if i not in l]
#print(l)
print(set(fruits))
print(fruits.count("orange"))
#pop method in a list used to delete last mathod from a list
#fruits.pop()#items from if we a passing arguments then its delete specified items
#print(fruits)
#fruits.pop(4)
#print(fruits)
#del fruits[4]# to delete operater items we use delete operater in a list
#print(fruits)
#print(enumerate(fruits))
#c=enumerate(fruits)
#print(c)
# remove method in list
# when we dont know the position of the item inside the list
#print(fruits.remove('banana'))
#print(fruits)
#fruits.remove('apple')
#print(fruits)
#print("the new {} is : ".format(l))
#print(l)
#print(set(fruits))
|
8,368 | 802eb0502c5eddcabd41b2d438bf53a5d6fb2c82 | from django.db import models
from NavigantAnalyzer.common import convert_datetime_string
import json
# A custom view-based model for flat outputs - RÖ - 2018-10-24
# Don't add, change or delete fields without editing the view in the Db
class Results_flat(models.Model):
race_id = models.IntegerField()
race_name = models.CharField(max_length=127)
race_serie = models.CharField(max_length=127, blank=True)
race_begin = models.DateTimeField(blank=True, null=True)
result_start_time = models.DateTimeField(blank=True, null=True)
runner_last_name = models.CharField(max_length=63, blank=True)
runner_first_name = models.CharField(max_length=63, blank=True)
result_emit = models.CharField(max_length=12, blank=True)
course_name = models.CharField(max_length=63)
course_length = models.IntegerField(blank=True, null=True)
course_num_participants = models.IntegerField(blank=True, null=True)
course_min_time = models.IntegerField(blank=True, null=True)
course_mean_time = models.IntegerField(blank=True, null=True)
course_min_puistotime = models.IntegerField(blank=True, null=True)
course_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_min_time = models.IntegerField(blank=True, null=True)
visit_mean_time = models.IntegerField(blank=True, null=True)
visit_min_puistotime = models.IntegerField(blank=True, null=True)
visit_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_puistoman_time = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_min_time = models.IntegerField(blank=True, null=True)
leg_mean_time = models.IntegerField(blank=True, null=True)
leg_min_puistotime = models.IntegerField(blank=True, null=True)
leg_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_order = models.IntegerField()
visit_code = models.IntegerField()
visit_time = models.IntegerField()
visit_position = models.IntegerField(blank=True)
visit_puistoposition = models.IntegerField(blank=True)
leg_time = models.IntegerField(blank=True)
leg_position = models.IntegerField(blank=True)
leg_puistoposition = models.IntegerField(blank=True)
visit_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08
visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
leg_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08
leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
leg_puisto_success = models.FloatField(null=True) # Since 2019-12-08
result_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
result_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08
result_puisto_max_level = models.FloatField(null=True) # Since 2019-12-08
result_puisto_success = models.FloatField(null=True) # Since 2019-12-08
result_puisto_optimum = models.IntegerField(null=True) # Since 2019-12-08
result_puisto_mistakes = models.IntegerField(null=True) # Since 2019-12-08
class Meta:
managed = False
db_table = 'NavigantAnalyzer_results_flat'
def get_fields(self):
result = dict()
datetime_fields = ['race_begin', 'result_start_time']
for field in Results_flat._meta.fields:
value = field.value_to_string(self)
if value.isdigit():
value = int(value)
if field.name in datetime_fields:
value = convert_datetime_string(value)
result[field.name] = value
return json.dumps(result)
|
8,369 | 77ae3ef1f6f267972a21f505caa7be29c19a6663 | from models import Session, FacebookUser, FacebookPage, FacebookGroup
from lib import get_scraper, save_user, save_page
import logging
logging.basicConfig(level=logging.DEBUG)
session = Session()
scraper = get_scraper(True)
for user in session.query(FacebookUser).filter(FacebookUser.data=="todo").filter("username ~ '^\d+$'").all():
user.username = scraper.get_username_api(str(user.uid)) or str(user.uid)
print user.uid, user.username
session.commit() |
8,370 | b0a49f5876bc3837b69a6dc274f9587a37351495 | import myThread
def main():
hosts={"127.0.0.1":"carpenter"}
myThread.messageListenThread(hosts)
if __name__ == '__main__':
main() |
8,371 | f039ab104093eb42c3f5d3c794710a0997e85387 | # coding: utf-8
# Aluno: Héricles Emanuel
# Matrícula: 117110647
# Atividade: É quadrado Mágico?
def eh_quadrado_magico(m):
somas_all = []
eh_magico = True
soma = 0
for e in range(len(m[0])):
soma += m[0][e]
# Linhas
for i in range(len(m)):
somados = 0
for e in range(len(m[i])):
somados += (m[i][e])
soma_all.append(somados)
# Colunas
x = 0
while x < len(m):
somados = 0
for n in range(len(m)):
somados += m[n][x]
x += 1
soma_all.append(somados)
# Diagonal1
x = len(m) - 1
somados = 0
for i in range(len(m)):
somados += m[i][x]
x -= 1
soma_all.append(somados)
# Diagonal 2
x = len(m) -1
somados = 0
for i in range(len(m) -1, -1, -1):
somados += m[i][x]
x -= 1
soma_all.append(somados)
for i in somados:
if i != soma:
return False
if eh_magico:
return True
quadrado1 = [[2,7,6],[9,5,1],[4,3,8]]
print eh_quadrado_magico(quadrado1)
|
8,372 | 14e304f30364932910986f2dda48223b6d4b01c0 | from tqdm import tqdm
import fasttext
import codecs
import os
import hashlib
import time
def make_save_folder(prefix="", add_suffix=True) -> str:
"""
1. 現在時刻のハッシュをsuffixにした文字列の生成
2. 生成した文字列のフォルダが無かったら作る
:param prefix:save folderの系統ラベル
:param add_suffix: suffixを付与するかを選ぶフラグ, True: 付与, False: 付与しない
:return: str, モデルのセーブ先フォルダ名
"""
if prefix == "":
prefix = "./fast_text"
if add_suffix:
prefix = f"{prefix}_{hashlib.sha1(time.ctime().encode()).hexdigest()}"
if not prefix.endswith("/"):
prefix += "/"
if not os.path.exists(prefix):
os.mkdir(prefix)
return prefix
def make_fast_text(tokens_list: list, num_dimension: int, save_folder="", min_count=1) -> bool:
"""
現在の言語処理では、単語分散表現を使うのがデファクトになった
単語分散表現は、単語を意味に応じて高次元空間に配置する手法である
原点からのベクトルに意味をもたせているので、同じ向きを向いている単語同士は意味が近い
理論的には分布仮説に基づいて、任意の単語に意味を、出現位置近傍の単語で規定する
2013年のword2vecという手法が初出で、後年に同じ研究者がfasttextを考案した
以下の点が異なる [here](http://54.92.5.12/2019/05/09/fasttext%E3%81%A8word2vec%E3%81%AE%E9%81%95%E3%81%84/)
- subwordと呼ばれる単語の部分文字列でも意味を獲得する
- fasttextのほうが圧倒的に早い
ここでは、文を分割したtokenのlistのlistを受け取って、fasttextのモデルをセーブする
tokenのlistは語順が重要なので、文脈の順を崩さないこと
:param tokens_list: list of list of str, tokenのリストのリスト
:param num_dimension: int, word embedding space dimension
:param save_folder: str, path to save folder
:param min_count: int, fasttextがモデルに反映する最小の単語出現頻度, 1なら全文書中に1度だけ出現, 3以上が相場
:return: bool
"""
# arrange save folder
save_folder = make_save_folder(save_folder, True if save_folder == "" else False)
# 入力はスペース区切りしたテキストファイル
file_name_input_text = f"{save_folder}delete_me_wakati.txt"
wakati = "\n".join([" ".join(tokens) for tokens in tqdm(tokens_list, desc="分かち書き @ fast text")])
with codecs.open(file_name_input_text, "w", "utf-8") as f:
f.write(wakati)
# model 生成
model_fast_text = fasttext.train_unsupervised(file_name_input_text, model="skipgram", dim=num_dimension,
minCount=min_count)
# save
model_fast_text.save_model(f"{save_folder}model_fast_text.bin")
return True
|
8,373 | 96936b7f6553bee06177eb66a2e63064c1bf51a6 | from __future__ import unicode_literals
import requests
try:
import json
except ImportError:
import simplejson as json
def main(app, data):
MEDIUM_API_ENDPOINT = 'https://medium.com/{0}/latest?format=json'
r = requests.get(MEDIUM_API_ENDPOINT.format(data.get('username')))
response_content = r.content.decode('utf-8')
json_data = response_content.lstrip('])}while(1);</x>')
return json.loads(json_data)
|
8,374 | 36fb0d936be5c5d305c4076fd1c497664c9b770a | # -*- coding: utf-8 -*-
from ..general.utils import log_errors
from googleapiclient import discovery
from oauth2client.client import SignedJwtAssertionCredentials
from django.conf import settings
from celery import shared_task
from logging import getLogger
import httplib2
_logger = getLogger(__name__)
def create_events_calendar():
""" Create an events calendar if none already exists. This function mostly exists for
creating calendars for dev environments, not used in prod.
"""
service = get_calendar_service()
if not service:
return
calendar = {
'summary': 'Ting som skjer i Telemarkgruppa',
'timeZone': 'Europe/Oslo',
}
cal_insert_response = service.calendars().insert(body=calendar).execute()
public_acl = {
'role': 'reader',
'scope': {
'type': 'default'
}
}
acl_insert_response = service.acl().insert(calendarId=cal_insert_response['id'], body=public_acl).execute()
return acl_insert_response
def get_calendar_service():
name = 'calendar'
version = 'v3'
scope = 'https://www.googleapis.com/auth/calendar'
# Provide a mock fallback for test environments where real interaction with
# Google calendar is not needed
if not hasattr(settings, 'GOOGLE_API_PRIVATE_KEY'):
_logger.info('Skipping Google calendar integration due to missing GOOGLE_API_PRIVATE_KEY '
'in settings.')
return
# Prepare credentials, and authorize HTTP object with them.
credentials = SignedJwtAssertionCredentials(settings.GOOGLE_API_EMAIL,
settings.GOOGLE_API_PRIVATE_KEY, scope)
http = credentials.authorize(http=httplib2.Http())
# Construct a service object via the discovery service.
service = discovery.build(name, version, http=http)
return service
@shared_task
@log_errors
def update_google_calendar_event(event_id):
from .models import Event
event = Event.objects.get(pk=event_id)
# If the event doesn't already exist on google calendar, create it
if not event.google_calendar_id:
_logger.info('Adding missing event to google calendar: %s', event.name)
add_google_calender_event(event.id)
return
# Authenticate and construct service.
service = get_calendar_service()
if not service:
return
payload = get_google_calendar_payload_for_event(event)
results = service.events().update(calendarId=settings.GOOGLE_CALENDAR_ID,
eventId=event.google_calendar_id, body=payload).execute()
_logger.info('Google calendar event for %s updated: %s', event.name, results)
@shared_task
@log_errors
def add_google_calender_event(event_id):
from .models import Event
event = Event.objects.get(pk=event_id)
if not event:
_logger.warning('Could not find event to add to Google Calendar: %d', event_id)
return
google_payload = get_google_calendar_payload_for_event(event)
service = get_calendar_service()
if not service:
return
results = service.events().insert(calendarId=settings.GOOGLE_CALENDAR_ID,
body=google_payload).execute()
if results.get('id'):
event.google_calendar_id = results['id']
event.save()
_logger.info("Google Calendar event for event '%s' created successfully", event.name)
else:
_logger.error("New Google Calendar event did not have id in response, was: %s", results)
@shared_task
@log_errors
def delete_google_calendar_event(google_calendar_event_id):
service = get_calendar_service()
if not service:
return
result = service.events().delete(calendarId=settings.GOOGLE_CALENDAR_ID,
eventId=google_calendar_event_id).execute()
_logger.info('Google calendar event %s deleted: %s', google_calendar_event_id, result)
def get_google_calendar_payload_for_event(event):
return {
'summary': event.name,
'location': event.location,
'description': event.summary,
'start': {
'dateTime': event.startdate.isoformat(),
'timeZone': 'Europe/Oslo',
},
'end': {
'dateTime': event.enddate.isoformat(),
'timeZone': 'Europe/Oslo',
}
}
|
8,375 | 64935ae910d5f330722b637dcc5794e7e07ab52d | from eval_lib.classification_results import analyze_one_classification_result
from eval_lib.classification_results import ClassificationBatches
from eval_lib.cloud_client import CompetitionDatastoreClient
from eval_lib.cloud_client import CompetitionStorageClient
from eval_lib.dataset_helper import DatasetMetadata
from eval_lib.dataset_helper import download_dataset
from eval_lib.dataset_helper import enforce_epsilon_and_compute_hash
from eval_lib.image_batches import AversarialBatches
from eval_lib.image_batches import DatasetBatches
from eval_lib.submissions import CompetitionSubmissions
from eval_lib.work_data import AttackWorkPieces
from eval_lib.work_data import DefenseWorkPieces
|
8,376 | 7491a17256b9bc7af0953202e45f0fd9d5c34c40 | import ctypes
import time
from order_queue.order import Order
class stock(ctypes.Structure):
_fields_ = [('stock_id', ctypes.c_int), ('order_type',ctypes.c_int),('Time',ctypes.c_char * 40),('user_id',ctypes.c_int),('volume',ctypes.c_int),
('price',ctypes.c_double)
]
class exchange(ctypes.Structure):
_fields_ = [
('stock_id',ctypes.c_int),
('buy_id',ctypes.c_int),
('sell_id',ctypes.c_int),
('Time',ctypes.c_char * 40),
('volume',ctypes.c_int),
('price',ctypes.c_double)
]
class TestSturcture(ctypes.Structure):
_fields_ = [
('a',ctypes.c_int),
('n',ctypes.c_int)
]
def time_conversion(input):
get = time.strftime("%H:%M:%S", input).encode('utf-8')
return get
def order_conversion(order):
get_time = time_conversion(order.time)
get = stock(int(order.get_stock_id()),int(order.get_direction()),get_time,int(order.get_user_id()[1:]),int(order.get_volume()),float(order.get_price()))
return get
def regenerate_order(result,long_order,short_order):
deal_volume = result.volume
if int(long_order.get_volume()) != result.volume:
left_volume = int(long_order.get_volume()) - result.volume
left_order = long_order
elif int(short_order.get_volume()) != result.volume:
left_volume = int(long_order.get_volume()) - result.volume
left_order = short_order
else:
return None
order = Order( left_order.get_stock_id(),left_order.get_user_id(),left_order.get_price(),left_volume,left_order.get_direction())
return order
if __name__ == '__main__':
print(time_conversion(time.localtime(time.time()))) |
8,377 | 443ce5c2ec86b9f89ad39ef2ac6772fa002e7e16 | class NumMatrix(object):
def __init__(self, matrix):
if matrix:
self.dp = [[0] * (len(matrix[0]) + 1) for i in range(len(matrix)+1)]
for i in xrange(1,len(matrix)+1):
for j in xrange(1,len(matrix[0])+1):
self.dp[i][j] = self.dp[i-1][j] + self.dp[i][j-1] + matrix[i-1][j-1] - self.dp[i-1][j-1]
def sumRegion(self, row1, col1, row2, col2):
return self.dp[row2+1][col2+1] + self.dp[row1][col1] - self.dp[row1][col2+1] - self.dp[row2+1][col1]
# Your NumMatrix object will be instantiated and called as such:
matrix = [[3,0,1,4,2],[5,6,3,2,1],[1,2,0,1,5],[4,1,0,1,7],[1,0,3,0,5]]
for m in matrix:
print m
print
numMatrix = NumMatrix(matrix)
print numMatrix.sumRegion(2, 1, 4, 3)
print numMatrix.sumRegion(1, 2, 3, 4)
|
8,378 | 2e3c1bf0a4c88bda35a48008cace8c21e071384e | disk = bytearray (1024*1024);
def config_complete():
pass
def open(readonly):
return 1
def get_size(h):
global disk
return len (disk)
def can_write(h):
return True
def can_flush(h):
return True
def is_rotational(h):
return False
def can_trim(h):
return True
def pread(h, count, offset):
global disk
return disk[offset:offset+count]
def pwrite(h, buf, offset):
global disk
end = offset + len (buf)
disk[offset:end] = buf
def flush(h):
pass
def trim(h, count, offset):
pass
|
8,379 | dfd2b515e08f285345c750bf00f6a55f43d60039 | """David's first approach when I exposed the problem.
Reasonable to add in the comparison?
"""
import numpy as np
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import ShuffleSplit
def correlation(x, y):
a = (x - x.mean(0)) / x.std(0)
b = (y - y.mean(0)) / y.std(0)
return a.T @ b / x.shape[0]
def partial_correlation_bagging(solver, x, y, z, ensemble=None):
if ensemble is None:
ensemble = [(range(len(x)), range(len(x))), ]
r = []
for set1, set2 in ensemble:
p_x = solver.fit(z[set1], x[set1]).predict(z[set2])
p_y = solver.fit(z[set1], y[set1]).predict(z[set2])
r.append(correlation(x[set2] - p_x, y[set2] - p_y))
return np.mean(r, 0)
def partial_correlation_loop(solver, x, y, ensemble=None):
e_hat = np.zeros(y.shape[1])
for i in range(y.shape[1]):
y_i = y[:, i].reshape(-1, 1)
y_not_i = np.delete(y, i, axis=1)
r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)
e_hat[i] = np.sum(r**2)
return e_hat
class PartialCorrelation(object):
def __init__(self, solver=None, bagging=False):
self.solver = RidgeCV() if solver is None else solver
self.bagging = bagging
def fit(self, X, Y):
ensemble = None
if self.bagging:
cv = ShuffleSplit(test_size=.5)
ensemble = [(train, test) for train, test in cv.split(X, Y)]
self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)
return self
if __name__ == '__main__':
from sklearn.preprocessing import scale
from sklearn.metrics import roc_auc_score
# Simulate data
"""Y = F(EX+N)"""
np.random.seed(0)
# Problem dimensionality
n = 1000
nE = nX = 10
nY = 10
snr = 25 # signal to noise ratio
selected = .5 # number of X feature selected by E
selected = min(int(np.floor(selected*nX)) + 1, nX-1)
E = np.identity(nX)
E[selected:] = 0
# X covariance
Cx = np.random.randn(nX, nX)
Cx = Cx.dot(Cx.T) / nX # sym pos-semidefin
X = np.random.multivariate_normal(np.zeros(nX), Cx, n)
# Noise (homosedastic in source space)
N = np.random.randn(n, nE)
# Forward operator (linear mixture)
F = np.random.randn(nY, nE)
Y = ((X @ E.T) * snr + N) @ F.T
X = scale(X)
Y = scale(Y)
# Fit method
partialcorr = PartialCorrelation()
train, test = range(0, n, 2), range(1, n, 2)
E_hat = partialcorr.fit(X[train], Y[train]).E_
# score = partialcorr.score(X[test], Y[test]) # TODO
print('E_auc', roc_auc_score(np.diag(E), E_hat))
|
8,380 | 7e985f55271c8b588abe54a07d20b89b2a29ff0d | from codecool_class import CodecoolClass
from mentor import Mentor
from student import Student
codecool_bp = CodecoolClass.create_local
|
8,381 | 6ba830aafbe8e4b42a0b927328ebcad1424cda5e | class Solution:
'''
先遍历整个string,并记录最小的character的出现次数。
如果最小character出现次数都不小于k,那么说明整个string就是满足条件的longest substring,返回原string的长度即可;
如果character的出现次数小于k,假设这个character是c,因为满足条件的substring永远不会包含c,所以满足条件的substring一定是在以c为分割参考下的某个substring中。所以我们需要做的就是把c当做是split的参考,在得到的String[]中再次调用我们的method,找到最大的返回值即可。
'''
def longestSubstring(self, s: str, k: int) -> int:
def helper(s, k):
if len(s) < k:
return 0
ch = min(set(s), key=s.count)
if s.count(ch) >= k:
return len(s)
else:
return max(helper(t, k) for t in s.split(ch))
return helper(s, k)
|
8,382 | ae7a2de8742e353818d4f5a28feb9bce04d787bb | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
''' |
8,383 | e0c6fb414d87c0a6377538089226e37b044edc70 | from django.shortcuts import render
from django_filters.rest_framework import DjangoFilterBackend
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from django.http import JsonResponse, Http404
from .serializers import *
from .models import *
from .filter import *
from rest_framework import generics
from rest_framework.filters import SearchFilter, OrderingFilter
# Create your views here.
@csrf_exempt
def TBGRApi(request, tbgrno=0):
if request.method == 'GET':
tbgrs = TBGR.objects.all()
tbgrs_serializer = TBGRSerializer(tbgrs, many=True)
return JsonResponse(tbgrs_serializer.data, safe=False)
elif request.method == 'POST':
tbgr_data = JSONParser().parse(request)
tbgr_serializer = TBGRSerializer(data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False)
elif request.method == 'PUT':
tbgr_data = JSONParser().parse(request)
tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])
tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
tbgr = TBGR.objects.get(tbgrno=tbgrno)
tbgr.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
@csrf_exempt
def BoardApi(request):
if request.method=='GET':
boards = Board.objects.all()
boards_serializer = BoardSerializer(boards, many=True)
return JsonResponse(boards_serializer.data, safe=False)
@csrf_exempt
def VillageApi(request, villageid=0):
if request.method == 'GET':
villages = Village.objects.all()
villages_serializer = VillageSerializer(villages, many=True)
return JsonResponse(villages_serializer.data, safe=False)
elif request.method == 'POST':
village_data = JSONParser().parse(request)
village_serializer = VillageSerializer(data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False)
elif request.method == 'PUT':
village_data = JSONParser().parse(request)
village = Village.objects.get(villageid=village_data['villageid'])
village_serializer = VillageSerializer(village, data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
village = Village.objects.get(villageid=villageid)
village.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
@csrf_exempt
def SlipApi(request, lotno=0):
if request.method == 'GET':
slips = Slip.objects.all()
slips_serializer = SlipSerializer(slips, many=True)
return JsonResponse(slips_serializer.data, safe=False)
elif request.method == 'POST':
slip_data = JSONParser().parse(request)
slip_serializer = SlipSerializer(data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False)
# elif request.method == 'POST':
# slip_data = JSONParser().parse(request)
# slips = Slip.objects.all()
# if slip_data['lotno']:
# slips = slips.filter(lotno=slip_data['lotno'])
# if slip_data['tbgrno']:
# slips = slips.filter(tbgrno=slip_data['tbgrno'])
# if slip_data['grade']:
# slips = slips.filter(grade=slip_data['grade'])
# slips_serializer = SlipSerializer(slips, many=True)
# return JsonResponse(slips_serializer.data, safe=False)
elif request.method == 'PUT':
slip_data = JSONParser().parse(request)
slip = Slip.objects.get(lotno=slip_data['lotno'])
slip_serializer = SlipSerializer(slip, data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
slip = Slip.objects.get(lotno=lotno)
slip.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
@csrf_exempt
def GradeApi(request):
if request.method == 'GET':
grades = Grades.objects.all()
grades_serializer = GradeSerializer(grades, many=True)
return JsonResponse(grades_serializer.data, safe=False)
@csrf_exempt
def ContactApi(request, phone=0):
if request.method == 'GET':
contacts = Contacts.objects.all()
contacts_serializer = ContactSerializer(contacts, many=True)
return JsonResponse(contacts_serializer.data, safe=False)
elif request.method == 'POST':
contact_data = JSONParser().parse(request)
contact_serializer = ContactSerializer(data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False, status=404)
elif request.method == 'PUT':
contact_data = JSONParser().parse(request)
contact = Contacts.objects.get(phone=contact_data['phone'])
contact_serializer = ContactSerializer(contact, data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
contact = Contacts.objects.get(phone=phone)
contact.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
|
8,384 | 3001534be3364be1148cd51a4a943fd8c975d87e | from flask import (Flask,
render_template,
request,
url_for,
redirect,
flash,
jsonify)
app = Flask(__name__)
@app.route('/', methods=['GET'])
def showHomepage():
return render_template('home.html')
if __name__ == '__main__':
print('app started')
app.secret_key = 'secretkey'
app.run(debug=True)
|
8,385 | 2f64aac7032ac099870269659a84b8c7c38b2bf0 | import pandas as pd
import subprocess
import statsmodels.api as sm
import numpy as np
import math
'''
This function prcesses the gene file
Output is a one-row file for a gene
Each individual is in a column
Input file must have rowname
gene: gene ENSG ID of interest
start_col: column number which the gene exp value starts
gene_col: column name for the gene column
gene_start_col: column name for the gene start position
chr_col: column name for the gene chromosome
'''
def process_input(gene_file, vcf_file, cov_file, gene, start_col, gene_col, chr_col, gene_start_col):
all_gene = pd.read_csv(gene_file, sep='[\t,]', header=0) #sep='[\t,]' allows read in both , and tab delimited files'''
gene=all_gene.loc[all_gene[gene_col]==gene,]
gene_start=int(gene.loc[:,gene_start_col])
chrom=int(gene.loc[:,chr_col])
gene=gene.iloc[:,start_col:gene.shape[1]]
start=int(gene_start-1e6)
if start < 0:start = 0
end=int(start+1e6)
cmd='tabix '+ vcf_file + ' ' + str(chrom) + ':' + str(start) + '-' + str(end)
s = subprocess.check_output(cmd, shell=True)
s = s.decode().strip()
s = s.split('\n')
gt=[]
for i in s:
gt.append(i.split('\t'))
s1=pd.DataFrame(gt)
info=s1.iloc[:,0:9]
s1=s1.drop([0,1,2,3,4,5,6,7,8],axis=1)
s1.index=info.iloc[:,2]
s2= pd.DataFrame()
for i in s1.columns:
s2[i] = s1[i].apply(lambda x: x.split(':')[1])
sample_ids = subprocess.check_output('/usr/local/bin/bcftools query -l {}'.format(vcf_file), shell=True).decode().strip().split()
s2.columns=sample_ids
s3=s2[gene.columns]
cov = pd.read_csv(cov_file, sep='\t', index_col=0, header=0)
cov=cov[gene.columns]
return gene, s3, cov
'''This function takes the input from the previous function
Fit linear model
Return beta and pvalues for the SNPs
'''
def lm_res(snps,gene,cov):
res = pd.DataFrame(np.zeros([snps.shape[0],2], dtype=np.float32))
res.index=snps.index
res.columns=['beta','pval']
for i in range(snps.shape[0]):
X=pd.concat([snps.iloc[i,].T, cov.T], axis=1)
X = X.apply(pd.to_numeric)
X = sm.add_constant(X)
est = sm.OLS(pd.to_numeric(gene.T.iloc[:,0]), X).fit()
res.iloc[i,0]=est.params[1]
res.iloc[i,1]=est.pvalues[1]
return res
|
8,386 | edc66bdc365f9c40ee33249bd2d02c0c5f28256a | import torch
import torch.nn as nn
class ReconstructionLoss(nn.Module):
def __init__(self, config):
super(ReconstructionLoss, self).__init__()
self.velocity_dim = config.velocity_dim
def forward(self, pre_seq, gt_seq):
MSE_loss = nn.MSELoss()
rec_loss = MSE_loss(pre_seq[:, 1:-1, :], gt_seq[:, 1:-1, :])+ \
MSE_loss(pre_seq[:, -1, :], gt_seq[:, -1, :]) + \
MSE_loss(pre_seq[:, 0, :-self.velocity_dim], gt_seq[:, 0, :-self.velocity_dim])
return rec_loss * 3
class BoneLoss(nn.Module):
def __init__(self, gt_bone_length, parents, _mean, _std, config):
super(BoneLoss, self).__init__()
self.gt_bone_length = gt_bone_length
self.parents = parents
self._mean = _mean
self._std = _std
self.device = config.device
self.pos_dim = config.pos_dim
def calculate_bone_length_for_seq(self, seq):
# AddBackward0 [batch_size, T, size]
src_seq = seq[..., :self.pos_dim] * self._std[:self.pos_dim] + self._mean[:self.pos_dim]
# ViewBackward [batch_size, T, J-1, 3]
new_seq = src_seq.view(src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3), 3)
root_pos = torch.tensor([[0, 0, 0]], dtype=torch.float32).to(self.device)
root_positions = torch.unsqueeze(torch.unsqueeze(root_pos, 0), 0)
root_positions = root_positions.repeat(src_seq.shape[0], src_seq.shape[1], 1, 1)
# CatBackward [batch_size, T, J, 3]
positions = torch.cat((root_positions, new_seq), 2)
# [200, 6, 23]
result_list = torch.empty((src_seq.shape[0], src_seq.shape[1], int(src_seq.shape[2] / 3)),
dtype=torch.float32).to(self.device)
index = 0
for joint, parent in enumerate(self.parents):
if parent == -1:
continue
# [200, 6, 3] SelectBackward
joint_pos = positions[:, :, joint]
parent_pos = positions[:, :, parent]
# [200, 6] SubBackward0
delta_x = joint_pos[..., 0] - parent_pos[..., 0]
delta_y = joint_pos[..., 1] - parent_pos[..., 1]
delta_z = joint_pos[..., 2] - parent_pos[..., 2]
# [200, 6] PowBackward0
length_temp = (delta_x ** 2 + delta_y ** 2 + delta_z ** 2) ** 0.5
result_list[..., index] = length_temp
index += 1
return result_list
def forward(self, predict_seq, _train_x1, _train_x2):
train_bone_length = self.calculate_bone_length_for_seq(predict_seq)
_, gt_bone_length = torch.broadcast_tensors(train_bone_length, self.gt_bone_length)
MSE_loss = nn.MSELoss()
bone_loss = MSE_loss(train_bone_length, gt_bone_length)
return bone_loss * 2
class VelocityLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(VelocityLoss, self).__init__()
self._mean = _mean
self._std = _std
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.velocity_dim = config.velocity_dim
self.vel_factor_dim = config.vel_factor_dim
def calculate_velocity(self, src_pos_seq, src_init_pos):
"""
:param pos_seq: the position of predict sequence [Batch_size, seq_length, J * 3]
:param init_pos: the position of initial frame
:return:
"""
# [batch_size, T + 1, J * 3] grad_fn=<CatBackward>
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)
velocity = temp_positions[:, 1:] - temp_positions[:, :-1]
return velocity
def get_vel_factor(self, velocity):
batch_size = velocity.shape[0]
seq_len = velocity.shape[1]
joint_num = int(velocity.shape[-1] / 3)
weight = [1, 2, 3, 4, 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 1]
parts = [1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 3, 3, 3, 3, 4, 4, 4, 4, 0, 0, 0]
weight_sum = []
for part in range(5):
p_sum = 0
for j in range(joint_num):
if parts[j] == part:
p_sum += weight[j]
weight_sum.append(p_sum)
vel_factor = torch.empty((batch_size, seq_len, self.vel_factor_dim), dtype=torch.float32).to(self.device)
for i in range(seq_len):
factor = torch.zeros((batch_size, self.vel_factor_dim), dtype=torch.float32).to(self.device)
for part in range(5):
for j in range(joint_num):
if parts[j] == part:
factor[:, part: part + 1] = factor[:, part: part + 1] + weight[j] / weight_sum[part] * \
pow(pow(velocity[:, i:i + 1, j * 3], 2) +
pow(velocity[:, i:i + 1, j * 3 + 1], 2) +
pow(velocity[:, i:i + 1, j * 3 + 2], 2), 0.5)
vel_factor[:, i, :] = factor
return vel_factor
def forward(self, predict_seq, _train_x1, _train_x2, _true_vel_factor):
# velocity
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
src_init_pos = (init_pos *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
train_velocity = self.calculate_velocity(src_pos_seq, src_init_pos)
# grad_fn=<DivBackward0>
_train_velocity = (train_velocity -
self._mean[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]) \
/ self._std[-(self.velocity_dim + self.vel_factor_dim):-self.vel_factor_dim]
train_vel_factor = self.get_vel_factor(train_velocity)
_train_vel_factor = (train_vel_factor - self._mean[-self.vel_factor_dim:]) / self._std[-self.vel_factor_dim:]
MSE_loss = nn.MSELoss()
zero_seq = torch.zeros(predict_seq[:, 0, -self.velocity_dim:].shape).to(self.device)
loss1 = MSE_loss(predict_seq[:, 1:, -self.velocity_dim:], _train_velocity[:, 1:, :]) * 10 \
+ MSE_loss(predict_seq[:, 0, -self.velocity_dim:], zero_seq) * 20
loss2 = MSE_loss(_true_vel_factor[:, 1:-1, :], _train_vel_factor[:, 1:, :]) * 10
velocity_loss = loss1 * 2 + loss2 * 1.5
return velocity_loss
class ContactLoss(nn.Module):
def __init__(self, _mean, _std, config):
super(ContactLoss, self).__init__()
self._mean = _mean
self._std = _std
self.root_pos_dim = config.root_pos_dim
self.pos_dim = config.pos_dim
self.contact_dim = config.contact_dim
self.velocity_dim = config.velocity_dim
self.left_feet = config.left_foot
self.right_feet = config.right_foot
self.vel_factor_dim = config.vel_factor_dim
self.contact_loc = self.contact_dim + self.velocity_dim + self.vel_factor_dim
def calculate_foot_vels(self, src_pos_seq, src_init_pos, left_foot, right_foot):
# [batch_size, T + 1, J * 3] grad_fn=<CatBackward>
temp_positions = torch.cat((torch.unsqueeze(src_init_pos, 1), src_pos_seq), 1)
left_foot0_vel = (temp_positions[:, 1:, left_foot[0] * 3:(left_foot[0] * 3 + 3)]
- temp_positions[:, :-1, left_foot[0] * 3:(left_foot[0] * 3 + 3)]) ** 2
left_foot0_vel = torch.sum(left_foot0_vel, -1, keepdim=True)
left_foot1_vel = (temp_positions[:, 1:, left_foot[1] * 3:(left_foot[1] * 3 + 3)]
- temp_positions[:, :-1, left_foot[1] * 3:(left_foot[1] * 3 + 3)]) ** 2
left_foot1_vel = torch.sum(left_foot1_vel, -1, keepdim=True)
right_foot0_vel = (temp_positions[:, 1:, right_foot[0] * 3:(right_foot[0] * 3 + 3)]
- temp_positions[:, :-1, right_foot[0] * 3:(right_foot[0] * 3 + 3)]) ** 2
right_foot0_vel = torch.sum(right_foot0_vel, -1, keepdim=True)
right_foot1_vel = (temp_positions[:, 1:, right_foot[1] * 3:(right_foot[1] * 3 + 3)]
- temp_positions[:, :-1, right_foot[1] * 3:(right_foot[1] * 3 + 3)]) ** 2
right_foot1_vel = torch.sum(right_foot1_vel, -1, keepdim=True)
feet_vel = torch.cat((left_foot0_vel, left_foot1_vel, right_foot0_vel, right_foot1_vel), -1)
return feet_vel # [batch_size, seq_size, 4]
def forward(self, predict_seq, _train_x1, _train_x2):
init_pos = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim]
src_pos_seq = (predict_seq[..., :self.pos_dim + self.root_pos_dim] *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
src_init_pos = (init_pos *
self._std[:self.pos_dim + self.root_pos_dim] + self._mean[:self.pos_dim + self.root_pos_dim])
feet_vels = self.calculate_foot_vels(src_pos_seq, src_init_pos, self.left_feet,
self.right_feet)
feet_contact = torch.abs(predict_seq[..., -(self.contact_dim + self.velocity_dim):-self.velocity_dim] *
self._std[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)] + \
self._mean[-self.contact_loc:-(self.velocity_dim + self.vel_factor_dim)])
contact_loss = torch.mean(torch.sum(torch.sum(feet_contact * feet_vels, dim=-1), dim=-1))
return contact_loss * 2
class KeyframeLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
self.key_num = config.key_num
def forward(self, predict_seq, _train_x1, gt_seq):
key_frame1 = _train_x1[:, 0, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
key_frame2 = gt_seq[:, -1, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
predict_pos = predict_seq[:, :, :self.pos_dim + self.root_pos_dim + self.root_rot_dim]
num = predict_pos.shape[1]
MSE_loss = nn.MSELoss()
loss = torch.zeros([]).to(self.device)
if num <= self.key_num * 2:
for i in range(num):
t = (i + 1) / (num + 1)
pos = predict_pos[:, i, :]
loss = loss + (1 - t) * MSE_loss(pos, key_frame1) + t * MSE_loss(pos, key_frame2)
else:
for i in range(self.key_num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame1)
for i in range(num - self.key_num, num):
loss = loss + MSE_loss(predict_pos[:, i, :], key_frame2)
return loss * 2
class SmoothLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.device = config.device
self.root_pos_dim = config.root_pos_dim
self.root_rot_dim = config.root_rot_dim
self.pos_dim = config.pos_dim
def forward(self, predict_seq, _train_x1, gt_seq):
init_root_pos = _train_x1[:, :1, self.pos_dim:self.pos_dim + self.root_pos_dim]
init_root_rot = _train_x1[:, :1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
root_pos_seq = predict_seq[..., self.pos_dim:self.pos_dim + self.root_pos_dim]
root_rot_seq = predict_seq[..., self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
last_root_pos = gt_seq[:, -1, self.pos_dim:self.pos_dim + self.root_pos_dim]
last_root_rot = gt_seq[:, -1, self.pos_dim + self.root_pos_dim:
self.pos_dim + self.root_pos_dim + self.root_rot_dim]
# pos_seq SliceBackward
seq_num = len(root_pos_seq[0])
batch_size = len(root_pos_seq)
root_pos_item = torch.zeros([]).to(self.device)
root_rot_item = torch.zeros([]).to(self.device)
MSE_loss = nn.MSELoss()
for idx in range(seq_num):
if idx == 0:
# MeanBackward0
root_pos_temp = MSE_loss(root_pos_seq[:, :1, :], init_root_pos[:])
root_rot_temp = MSE_loss(root_rot_seq[:, :1, :], init_root_rot[:])
elif idx == seq_num - 1:
root_pos_temp = MSE_loss(root_pos_seq[:, idx, :], last_root_pos) + \
MSE_loss(root_pos_seq[:, idx - 1, :], last_root_pos)
root_rot_temp = MSE_loss(root_rot_seq[:, idx, :], last_root_rot) + \
MSE_loss(root_rot_seq[:, idx - 1, :], last_root_rot)
else:
root_pos_temp = torch.sum(torch.pow(root_pos_seq[:, idx, :] - root_pos_seq[:, idx - 1, :], 2)) \
/ batch_size / seq_num
root_rot_temp = torch.sum(torch.pow(root_rot_seq[:, idx, :] - root_rot_seq[:, idx - 1, :], 2)) \
/ batch_size / seq_num
# AddBackward0
root_pos_item = root_pos_item + root_pos_temp
root_rot_item = root_rot_item + root_rot_temp
loss = root_pos_item + root_rot_item # DivBackward0
return loss * 1.5
|
8,387 | 9ca769ae8bbabee20b5dd4d75ab91d3c30e8d1bf | def filter(txt): # can be improved using regular expression
output = []
for t in txt:
if t == "(" or t == ")" or t == "[" or t == "]":
output.append(t)
return output
result = []
while True:
raw_input = input()
line = filter(raw_input)
if raw_input != ".":
stack = []
err = False
for l in line:
try:
if l == "(" or l == "[":
stack.append(l)
elif l == "]":
if stack[len(stack) - 1] == "[":
stack.pop()
else:
err = True
break
elif l == ")":
if stack[len(stack) - 1] == "(":
stack.pop()
else:
err = True
break
except:
err = True
break
if err == True or len(stack) != 0:
result.append("no")
else:
result.append("yes")
else:
break
for r in result:
print(r)
|
8,388 | a8659ca7d7a5870fc6f62b3dfee1779e33373e7b | #!/usr/bin/python2.7
'''USAGE: completeness.py BLAST_output (tab formatted)
Prints % completeness based on marker gene BLAST of caled genes from a genome
Markers from Lan et al. (2016)
'''
import sys
with open(sys.argv[1],'r') as blastOut:
geneHits = []
orgHits = []
hits = 0.0
for line in blastOut:
hits += 1.0
currHit = line.split()[1]
currGene = currHit.split('_')[-1]
currOrg = currHit.split('_')[0]
geneHits.append(currGene)
orgHits.append(currOrg)
uniqueGenes = list(set(geneHits))
multiHits = []
for index in uniqueGenes:
if geneHits.count(index) >= 2:
multiHits.append(geneHits.count(index))
contamination = (float(sum(multiHits)) / hits) * float(len(multiHits))
contamination = round((contamination * 100.0), 2)
uniqueGenes = float(len(uniqueGenes))
completeness = round(((uniqueGenes / 73.0) * 100.0), 2)
uniqueOrgs = list(set(orgHits))
topCount = 0
hitCounts = []
topOrg = 'org'
for index in uniqueOrgs:
if orgHits.count(index) > topCount:
topCount = orgHits.count(index)
hitCounts.append(topCount)
topOrg = index
otherCount = float(hits - topCount)
uniqueOrgs = float(len(uniqueOrgs))
heterogeneity = (otherCount / float(hits)) * uniqueOrgs
heterogeneity = round((heterogeneity * 100.0), 2)
print('\nGenome bin: ' + str(sys.argv[1]))
print('Completeness: ' + str(completeness) + '%')
print('Contamination: ' + str(contamination) + '%')
print('Heterogeneity: ' + str(heterogeneity) + '%\n')
|
8,389 | dc2c9293040204f0ec2156c41b8be624f4e5cf99 | # 라이브러리 환경
import pandas as pd
import numpy as np
# sklearn 테이터셋에서 iris 데이터셋 로딩
from sklearn import datasets
iris = datasets.load_iris()
# iris 데이터셋은 딕셔너리 형태이므로, key 값 확인
'''
print(iris.keys())
print(iris['DESCR'])
print("데이터 셋 크기:", iris['target'])
print("데이터 셋 내용:\n", iris['target'])
'''
# data 속성의 데이터셋 크기
print("데이터 셋 크기:", iris['data'].shape)
# data 속성의 데이터셋 내용(첫 7개 행 추출)
data1 = ['a', 'b', 'c', 'd', 'e']
print(type(data1))
sr1 = pd.Series(data1)
# print(type(sr1))
data2 = (1, 2, 3.14, 100, -10)
sr2 = pd.Series(data2)
dict_data = {'c1':data1, 'c2':data2}
df = pd.DataFrame(dict_data)
print(df)
# 열(columns)과 행(index)이름 바꾸기
df.columns = ['string1', 'string2']
df.index = ['r1', 'r2', 'r3', 'r4', 'r5']
# print(df.loc['r2':'r4', 'string1':'string2'])
print('데이터셋 내용:\n', iris['data'][:7, :])
df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
print('데이터 프레임의 형태:', df.shape)
df.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
print(df.head(2))
df['Target'] = iris['target']
print(df.head())
x = [2, 1, 13, 4, 15, 26]
y = [0, 4, 31, 2, 42, 54]
df = pd.DataFrame({'X':x, 'Y':y})
print(df)
|
8,390 | 4c9a3983180cc75c39da41f7f9b595811ba0dc35 | import urllib.request
from urllib.request import Request, urlopen
import json
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
"""
Web Scraper ======================================================================
"""
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def save_json(file):
with open('gif_list.txt', 'w') as f:
f.write(file)
"""
Scraping
"""
# req = Request('https://play.pokemonshowdown.com/sprites/ani/', headers={'User-Agent': 'Mozilla/5.0'})
# url = urlopen(req).read()
# url = 'https://play.pokemonshowdown.com/sprites/ani/'
# raw_html = simple_get(url)
# soup = BeautifulSoup(url, 'lxml')
# # a = soup.find_all('td', attrs={'valign': 'top'})
# a = soup.find_all('a')
# videolist = []
# print(a)
# for v in a:
# tmp = v['href']
# videolist.append(tmp)
# filename = videolist[5:]
# print(filename)
def dl_img(url, file_path, file_name):
full_path = file_path + '/' + file_name + '.gif'
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(url, full_path)
filename = ['abomasnow-f.gif', 'abomasnow-mega.gif', 'abomasnow.gif', 'abra.gif', 'absol-mega.gif', 'absol.gif', 'accelgor.gif', 'aegislash-blade.gif', 'aegislash.gif', 'aerodactyl-mega.gif', 'aerodactyl.gif', 'aggron-mega.gif', 'aggron.gif', 'aipom-f.gif', 'aipom.gif', 'alakazam-mega.gif', 'alakazam.gif', 'alcremie-caramel-swirl-berry.gif', 'alcremie-caramel-swirl-clover.gif', 'alcremie-caramel-swirl-flower.gif', 'alcremie-caramel-swirl-love.gif', 'alcremie-caramel-swirl-ribbon.gif', 'alcremie-caramel-swirl-star.gif', 'alcremie-caramelswirl.gif', 'alcremie-gmax.gif', 'alcremie-lemon-cream-berry.gif', 'alcremie-lemon-cream-clover.gif', 'alcremie-lemon-cream-flower.gif', 'alcremie-lemon-cream-love.gif', 'alcremie-lemon-cream-ribbon.gif', 'alcremie-lemon-cream-star.gif', 'alcremie-lemoncream.gif', 'alcremie-matcha-cream-berry.gif', 'alcremie-matcha-cream-clover.gif', 'alcremie-matcha-cream-flower.gif', 'alcremie-matcha-cream-love.gif', 'alcremie-matcha-cream-ribbon.gif', 'alcremie-matcha-cream-star.gif', 'alcremie-matchacream.gif', 'alcremie-mint-cream-berry.gif', 'alcremie-mint-cream-clover.gif', 'alcremie-mint-cream-flower.gif', 'alcremie-mint-cream-love.gif', 'alcremie-mint-cream-ribbon.gif', 'alcremie-mint-cream-star.gif', 'alcremie-mintcream.gif', 'alcremie-rainbow-swirl-berry.gif', 'alcremie-rainbow-swirl-clover.gif', 'alcremie-rainbow-swirl-flower.gif', 'alcremie-rainbow-swirl-love.gif', 'alcremie-rainbow-swirl-ribbon.gif', 'alcremie-rainbow-swirl-star.gif', 'alcremie-rainbowswirl.gif', 'alcremie-ruby-cream-berry.gif', 'alcremie-ruby-cream-clover.gif', 'alcremie-ruby-cream-flower.gif', 'alcremie-ruby-cream-love.gif', 'alcremie-ruby-cream-ribbon.gif', 'alcremie-ruby-cream-star.gif', 'alcremie-ruby-swirl-berry.gif', 'alcremie-ruby-swirl-clover.gif', 'alcremie-ruby-swirl-flower.gif', 'alcremie-ruby-swirl-love.gif', 'alcremie-ruby-swirl-ribbon.gif', 'alcremie-ruby-swirl-star.gif', 'alcremie-rubycream.gif', 'alcremie-rubyswirl.gif', 'alcremie-salted-cream-berry.gif', 'alcremie-salted-cream-clover.gif', 'alcremie-salted-cream-flower.gif', 'alcremie-salted-cream-love.gif', 'alcremie-salted-cream-ribbon.gif', 'alcremie-salted-cream-star.gif', 'alcremie-saltedcream.gif', 'alcremie-vanilla-cream-berry.gif', 'alcremie-vanilla-cream-clover.gif', 'alcremie-vanilla-cream-flower.gif', 'alcremie-vanilla-cream-love.gif', 'alcremie-vanilla-cream-ribbon.gif', 'alcremie-vanilla-cream-star.gif', 'alcremie.gif', 'alomomola.gif', 'altaria-mega.gif', 'altaria.gif', 'amaura.gif', 'ambipom-f.gif', 'ambipom.gif', 'amoonguss.gif', 'ampharos-mega.gif', 'ampharos.gif', 'anorith.gif', 'appletun-gmax.gif', 'appletun.gif', 'applin.gif', 'araquanid-totem.gif', 'araquanid.gif', 'arbok.gif', 'arcanine.gif', 'arceus-bug.gif', 'arceus-dark.gif', 'arceus-dragon.gif', 'arceus-electric.gif', 'arceus-fairy.gif', 'arceus-fighting.gif', 'arceus-fire.gif', 'arceus-flying.gif', 'arceus-ghost.gif', 'arceus-grass.gif', 'arceus-ground.gif', 'arceus-ice.gif', 'arceus-poison.gif', 'arceus-psychic.gif', 'arceus-rock.gif', 'arceus-steel.gif', 'arceus-water.gif', 'arceus.gif', 'archen.gif', 'archeops.gif', 'arctovish.gif', 'arctozolt.gif', 'ariados.gif', 'armaldo.gif', 'aromatisse.gif', 'aron.gif', 'arrokuda.gif', 'articuno.gif', 'audino-mega.gif', 'audino.gif', 'aurorus.gif', 'aurumoth.gif', 'avalugg.gif', 'axew.gif', 'azelf.gif', 'azumarill.gif', 'azurill.gif', 'bagon.gif', 'baltoy.gif', 'banette-mega.gif', 'banette.gif', 'barbaracle.gif', 'barboach.gif', 'barboarch.gif', 'barraskewda.gif', 'basculin-bluestriped.gif', 'basculin.gif', 'bastiodon.gif', 'bayleef.gif', 'beartic.gif', 'beautifly-f.gif', 'beautifly.gif', 'beedrill-mega.gif', 'beedrill.gif', 'beheeyem.gif', 'beldum.gif', 'bellossom.gif', 'bellsprout.gif', 'bergmite.gif', 'bewear.gif', 'bibarel-f.gif', 'bibarel.gif', 'bidoof-f.gif', 'bidoof.gif', 'binacle.gif', 'bisharp.gif', 'blacephalon.gif', 'blastoise-mega.gif', 'blastoise.gif', 'blaziken-f.gif', 'blaziken-mega.gif', 'blaziken.gif', 'blipbug.gif', 'blissey.gif', 'blitzle.gif', 'boldore.gif', 'boltund.gif', 'bonsly.gif', 'bouffalant.gif', 'bounsweet.gif', 'braixen.gif', 'braviary.gif', 'breloom.gif', 'brionne.gif', 'bronzong.gif', 'bronzor.gif', 'bruxish.gif', 'budew.gif', 'buizel-f.gif', 'buizel.gif', 'bulbasaur.gif', 'buneary.gif', 'bunnelby.gif', 'burmy-sandy.gif', 'burmy-trash.gif', 'burmy.gif', 'butterfree-gmax.gif', 'butterfree.gif', 'buzzwole.gif', 'cacnea.gif', 'cacturne-f.gif', 'cacturne.gif', 'camerupt-f.gif', 'camerupt-mega.gif', 'camerupt.gif', 'camperupt-mega.gif', 'carbink.gif', 'caribolt.gif', 'carkol.gif', 'carnivine.gif', 'carracosta.gif', 'carvanha.gif', 'cascoon.gif', 'castform-rainy.gif', 'castform-snowy.gif', 'castform-sunny.gif', 'castform.gif', 'caterpie.gif', 'cawmodore.gif', 'celebi.gif', 'celesteela.gif', 'centiskorch-gmax.gif', 'centiskorch.gif', 'chandelure.gif', 'chansey.gif', 'charizard-gmax.gif', 'charizard-megax.gif', 'charizard-megay.gif', 'charizard.gif', 'charjabug.gif', 'charmander.gif', 'charmeleon.gif', 'chatot.gif', 'cherrim-sunshine.gif', 'cherrim.gif', 'cherubi.gif', 'chesnaught.gif', 'chespin.gif', 'chewtle.gif', 'chikorita.gif', 'chimchar.gif', 'chimecho.gif', 'chinchou.gif', 'chingling.gif', 'cinccino.gif', 'cinderace.gif', 'clamperl.gif', 'clauncher.gif', 'clawitzer.gif', 'claydol.gif', 'clefable.gif', 'clefairy.gif', 'cleffa.gif', 'clobbopus.gif', 'cloyster.gif', 'coalossal-gmax.gif', 'coalossal.gif', 'cobalion.gif', 'cofagrigus.gif', 'combee-f.gif', 'combee.gif', 'combusken-f.gif', 'combusken.gif', 'comfey.gif', 'conkeldurr.gif', 'copperajah-gmax.gif', 'copperajah.gif', 'corphish.gif', 'corsola-galar.gif', 'corsola.gif', 'corviknight-gmax.gif', 'corviknight.gif', 'corvisquire.gif', 'cosmoem.gif', 'cosmog.gif', 'cottonee.gif', 'crabominable.gif', 'crabrawler.gif', 'cradily.gif', 'cramorant-gorging.gif', 'cramorant-gulping.gif', 'cramorant.gif', 'cranidos.gif', 'crawdaunt.gif', 'cresselia.gif', 'croagunk-f.gif', 'croagunk.gif', 'crobat.gif', 'croconaw.gif', 'crucibelle-mega.gif', 'crucibelle.gif', 'crustle.gif', 'cryogonal.gif', 'cubchoo.gif', 'cubone.gif', 'cufant.gif', 'cursola.gif', 'cutiefly.gif', 'cyndaquil.gif', 'darkrai.gif', 'darmanitan-galar.gif', 'darmanitan-galarzen.gif', 'darmanitan-zen.gif', 'darmanitan.gif', 'dartrix.gif', 'darumaka-galar.gif', 'darumaka.gif', 'decidueye.gif', 'dedenne.gif', 'deerling-autumn.gif', 'deerling-summer.gif', 'deerling-winter.gif', 'deerling.gif', 'deino.gif', 'delcatty.gif', 'delibird.gif', 'delphox.gif', 'deoxys-attack.gif', 'deoxys-defense.gif', 'deoxys-speed.gif', 'deoxys.gif', 'dewgong.gif', 'dewott.gif', 'dewpider.gif', 'dhelmise.gif', 'dialga.gif', 'diancie-mega.gif', 'diancie.gif', 'diggersby.gif', 'diglett-alola.gif', 'diglett.gif', 'ditto.gif', 'dodrio.gif', 'doduo.gif', 'donphan-f.gif', 'donphan.gif', 'dottler.gif', 'doublade.gif', 'dracovish.gif', 'dracozolt.gif', 'dragalge.gif', 'dragapult.gif', 'dragonair.gif', 'dragonite.gif', 'drakloak.gif', 'drampa.gif', 'drapion.gif', 'dratini.gif', 'drednaw-gmax.gif', 'drednaw.gif', 'dreepy.gif', 'drifblim.gif', 'drifloon.gif', 'drilbur.gif', 'drizzile.gif', 'drowzee.gif', 'druddigon.gif', 'dubwool.gif', 'ducklett.gif', 'dugtrio-alola.gif', 'dugtrio.gif', 'dunsparce.gif', 'duosion.gif', 'duraludon-gmax.gif', 'duraludon.gif', 'durant.gif', 'dusclops.gif', 'dusknoir.gif', 'duskull.gif', 'dustox-f.gif', 'dustox.gif', 'dwebble.gif', 'eelektrik.gif', 'eelektross.gif', 'eevee-gmax.gif', 'eevee-starter.gif', 'eevee.gif', 'eiscue-noice.gif', 'eiscue.gif', 'ekans.gif', 'eldegoss.gif', 'electabuzz.gif', 'electivire.gif', 'electrike.gif', 'electrode.gif', 'elekid.gif', 'elgyem.gif', 'emboar.gif', 'emolga.gif', 'empoleon.gif', 'entei.gif', 'equilibra.gif', 'escavalier.gif', 'espeon.gif', 'espurr.gif', 'eternatus-eternamax.gif', 'eternatus.gif', 'excadrill.gif', 'exeggcute.gif', 'exeggutor-alola.gif', 'exeggutor.gif', 'exploud.gif', 'falinks.gif', 'farfetchd-galar.gif', 'farfetchd.gif', 'fearow.gif', 'feebas.gif', 'fennekin.gif', 'feraligatr.gif', 'ferroseed.gif', 'ferrothorn.gif', 'fidgit.gif', 'finneon-f.gif', 'finneon.gif', 'flaaffy.gif', 'flabebe-blue.gif', 'flabebe-orange.gif', 'flabebe-white.gif', 'flabebe-yellow.gif', 'flabebe.gif', 'flapple-gmax.gif', 'flapple.gif', 'flareon.gif', 'fletchinder.gif', 'fletchling.gif', 'floatzel-f.gif', 'floatzel.gif', 'floette-blue.gif', 'floette-eternal.gif', 'floette-orange.gif', 'floette-white.gif', 'floette-yellow.gif', 'floette.gif', 'florges-blue.gif', 'florges-orange.gif', 'florges-white.gif', 'florges-yellow.gif', 'florges.gif', 'flygon.gif', 'fomantis.gif', 'foongus.gif', 'forretress.gif', 'fraxure.gif', 'frillish-f.gif', 'frillish.gif', 'froakie.gif', 'frogadier.gif', 'froslass.gif', 'frosmoth.gif', 'furfrou-dandy.gif', 'furfrou-debutante.gif', 'furfrou-diamond.gif', 'furfrou-heart.gif', 'furfrou-kabuki.gif', 'furfrou-lareine.gif', 'furfrou-matron.gif', 'furfrou-pharaoh.gif', 'furfrou-star.gif', 'furfrou.gif', 'furret.gif', 'gabite-f.gif', 'gabite.gif', 'gallade-mega.gif', 'gallade.gif', 'galvantula.gif', 'garbodor-gmax.gif', 'garbodor.gif', 'garchomp-f.gif', 'garchomp-mega.gif', 'garchomp.gif', 'gardevoir-mega.gif', 'gardevoir.gif', 'gastly.gif', 'gastrodon-east.gif', 'gastrodon.gif', 'genesect-burn.gif', 'genesect-chill.gif', 'genesect-douse.gif', 'genesect-shock.gif', 'genesect.gif', 'gengar-gmax.gif', 'gengar-mega.gif', 'gengar.gif', 'geodude-alola.gif', 'geodude.gif', 'gible-f.gif', 'gible.gif', 'gigalith.gif', 'girafarig-f.gif', 'girafarig.gif', 'giratina-origin.gif', 'giratina.gif', 'glaceon.gif', 'glalie-mega.gif', 'glalie.gif', 'glameow.gif', 'gligar-f.gif', 'gligar.gif', 'gliscor.gif', 'gloom.gif', 'gogoat.gif', 'golbat-f.gif', 'golbat.gif', 'goldeen.gif', 'golduck.gif', 'golem-alola.gif', 'golem.gif', 'golett.gif', 'golisopod.gif', 'golurk.gif', 'goodra.gif', 'goomy.gif', 'gorebyss.gif', 'gossifleur.gif', 'gothita.gif', 'gothitelle.gif', 'gothorita.gif', 'gourgeist-large.gif', 'gourgeist-small.gif', 'gourgeist-super.gif', 'gourgeist.gif', 'granbull.gif', 'grapploct.gif', 'graveler-alola.gif', 'graveler.gif', 'greedent.gif', 'greninja-ash.gif', 'greninja.gif', 'grimer-alola.gif', 'grimer.gif', 'grimmsnarl-gmax.gif', 'grimmsnarl.gif', 'grookey.gif', 'grotle.gif', 'groudon-primal.gif', 'groudon.gif', 'grovyle.gif', 'growlithe.gif', 'grubbin.gif', 'grumpig.gif', 'gulpin-f.gif', 'gulpin.gif', 'gumshoos-totem.gif', 'gumshoos.gif', 'gurdurr.gif', 'guzzlord.gif', 'gyarados-mega.gif', 'gyarados.gif', 'hakamoo.gif', 'happiny.gif', 'hariyama.gif', 'hatenna.gif', 'hatterene-gmax.gif', 'hatterene.gif', 'hattrem.gif', 'haunter.gif', 'hawlucha.gif', 'haxorus.gif', 'heatmor.gif', 'heatran.gif', 'heliolisk.gif', 'helioptile.gif', 'heracross-f.gif', 'heracross-mega.gif', 'heracross.gif', 'herdier.gif', 'hippopotas-f.gif', 'hippopotas.gif', 'hippowdon-f.gif', 'hippowdon.gif', 'hitmonchan.gif', 'hitmonlee.gif', 'hitmontop.gif', 'honchkrow.gif', 'honedge.gif', 'hooh.gif', 'hoopa-mega.gif', 'hoopa-unbound.gif', 'hoopa.gif', 'hoothoot.gif', 'hoppip.gif', 'horsea.gif', 'houndoom-f.gif', 'houndoom-mega.gif', 'houndoom.gif', 'houndour.gif', 'huntail.gif', 'hydreigon.gif', 'hypno.gif', 'igglybuff.gif', 'illumise.gif', 'impidimp.gif', 'incineroar.gif', 'indeedee-f.gif', 'indeedee.gif', 'infernape.gif', 'inkay.gif', 'inteleon.gif', 'ivysaur.gif', 'jangmoo.gif', 'jellicent-f.gif', 'jellicent.gif', 'jigglypuff.gif', 'jirachi.gif', 'jolteon.gif', 'joltik.gif', 'jumbao.gif', 'jumpluff.gif', 'jynx.gif', 'kabuto.gif', 'kabutops.gif', 'kadabra.gif', 'kakuna.gif', 'kangaskhan-mega.gif', 'kangaskhan.gif', 'karrablast.gif', 'kartana.gif', 'kecleon.gif', 'keldeo-resolute.gif', 'keldeo.gif', 'kerfluffle-f.gif', 'kerfluffle.gif', 'kingdra.gif', 'kingler-gmax.gif', 'kingler.gif', 'kirlia.gif', 'kitsunoh-f.gif', 'kitsunoh.gif', 'klang.gif', 'klefki.gif', 'klink.gif', 'klinklang.gif', 'koffing.gif', 'komala.gif', 'kommo-o-totem.gif', 'kommoo.gif', 'krabby.gif', 'kricketot-f.gif', 'kricketot.gif', 'kricketune-f.gif', 'kricketune.gif', 'krilowatt.gif', 'krokorok.gif', 'krookodile.gif', 'kyogre-primal.gif', 'kyogre.gif', 'kyurem-black.gif', 'kyurem-white.gif', 'kyurem.gif', 'lairon.gif', 'lampent.gif', 'landorus-therian.gif', 'landorus.gif', 'lanturn.gif', 'lapras-gmax.gif', 'lapras.gif', 'larvesta.gif', 'larvitar.gif', 'latias-mega.gif', 'latias.gif', 'latios-mega.gif', 'latios.gif', 'leafeon.gif', 'leavanny.gif', 'ledian-f.gif', 'ledian.gif', 'ledyba-f.gif', 'ledyba.gif', 'lickilicky.gif', 'lickitung.gif', 'liepard.gif', 'lileep.gif', 'lilligant.gif', 'lillipup.gif', 'linoone-galar.gif', 'linoone.gif', 'litleo.gif', 'litten.gif', 'litwick.gif', 'lombre.gif', 'lopunny-mega.gif', 'lopunny.gif', 'lotad.gif', 'loudred.gif', 'lucario-mega.gif', 'lucario.gif', 'ludicolo-f.gif', 'ludicolo.gif', 'lugia.gif', 'lumineon-f.gif', 'lumineon.gif', 'lunala.gif', 'lunatone.gif', 'lurantis-totem.gif', 'lurantis.gif', 'luvdisc.gif', 'luxio-f.gif', 'luxio.gif', 'luxray-f.gif', 'luxray.gif', 'lycanroc-dusk.gif', 'lycanroc-midnight.gif', 'lycanroc.gif', 'machamp-gmax.gif', 'machamp.gif', 'machoke.gif', 'machop.gif', 'magby.gif', 'magcargo.gif', 'magearna-original.gif', 'magearna.gif', 'magikarp-f.gif', 'magikarp.gif', 'magmar.gif', 'magmortar.gif', 'magnemite.gif', 'magneton.gif', 'magnezone.gif', 'makuhita.gif', 'malaconda.gif', 'malamar.gif', 'mamoswine-f.gif', 'mamoswine.gif', 'manaphy.gif', 'mandibuzz.gif', 'manectric-mega.gif', 'manectric.gif', 'mankey.gif', 'mantine.gif', 'mantyke.gif', 'maractus.gif', 'mareanie.gif', 'mareep.gif', 'marill.gif', 'marowak-alola-totem.gif', 'marowak-alola.gif', 'marowak-totem.gif', 'marowak.gif', 'marshadow.gif', 'marshtomp.gif', 'masquerain.gif', 'mawile-mega.gif', 'mawile.gif', 'medicham-f.gif', 'medicham-mega.gif', 'medicham.gif', 'meditite-f.gif', 'meditite.gif', 'meganium-f.gif', 'meganium.gif', 'melmetal-gmax.gif', 'melmetal.gif', 'meloetta-pirouette.gif', 'meloetta.gif', 'meltan.gif', 'meowstic-f.gif', 'meowstic.gif', 'meowth-alola.gif', 'meowth-galar.gif', 'meowth-gmax.gif', 'meowth.gif', 'mesprit.gif', 'metagross-mega.gif', 'metagross.gif', 'metang.gif', 'metapod.gif', 'mew.gif', 'mewtwo-megax.gif', 'mewtwo-megay.gif', 'mewtwo.gif', 'mienfoo.gif', 'mienshao.gif', 'mightyena.gif', 'milcery.gif', 'milotic-f.gif', 'milotic.gif', 'miltank.gif', 'mimejr.gif', 'mimikyu-busted-totem.gif', 'mimikyu-busted.gif', 'mimikyu-totem.gif', 'mimikyu.gif', 'minccino.gif', 'minior-blue.gif', 'minior-green.gif', 'minior-indigo.gif', 'minior-meteor.gif', 'minior-orange.gif', 'minior-violet.gif', 'minior-yellow.gif', 'minior.gif', 'minun.gif', 'misdreavus.gif', 'mismagius.gif', 'mollux.gif', 'moltres.gif', 'monferno.gif', 'morelull.gif', 'morgrem.gif', 'morpeko-hangry.gif', 'morpeko.gif', 'mothim.gif', 'mrmime-galar.gif', 'mrmime.gif', 'mrrime.gif', 'mudbray.gif', 'mudkip.gif', 'mudsdale.gif', 'muk-alola.gif', 'muk.gif', 'munchlax.gif', 'munna.gif', 'murkrow-f.gif', 'murkrow.gif', 'musharna.gif', 'naganadel.gif', 'natu.gif', 'naviathan.gif', 'necrozma-dawnwings.gif', 'necrozma-duskmane.gif', 'necrozma-ultra.gif', 'necrozma.gif', 'necturna.gif', 'nickit.gif', 'nidoking.gif', 'nidoqueen.gif', 'nidoran-f.gif', 'nidoran.gif', 'nidoranf.gif', 'nidoranm.gif', 'nidorina.gif', 'nidorino.gif', 'nihilego.gif', 'nincada.gif', 'ninetales-alola.gif', 'ninetales.gif', 'ninjask.gif', 'noctowl.gif', 'noibat.gif', 'noivern.gif', 'nosepass.gif', 'numel-f.gif', 'numel.gif', 'nuzleaf-f.gif', 'nuzleaf.gif', 'obstagoon.gif', 'octillery-f.gif', 'octillery.gif', 'oddish.gif', 'omanyte.gif', 'omastar.gif', 'onix.gif', 'oranguru.gif', 'orbeetle-gmax.gif', 'orbeetle.gif', 'oricorio-pau.gif', 'oricorio-pompom.gif', 'oricorio-sensu.gif', 'oricorio.gif', 'oshawott.gif', 'pachirisu-f.gif', 'pachirisu.gif', 'pajantom.gif', 'palkia.gif', 'palossand.gif', 'palpitoad.gif', 'pancham.gif', 'pangoro.gif', 'panpour.gif', 'pansage.gif', 'pansear.gif', 'paras.gif', 'parasect.gif', 'passimian.gif', 'patrat.gif', 'pawniard.gif', 'pelipper.gif', 'perrserker.gif', 'persian-alola.gif', 'persian.gif', 'petilil.gif', 'phanpy.gif', 'phantump.gif', 'pheromosa.gif', 'phione.gif', 'pichu.gif', 'pidgeot-mega.gif', 'pidgeot.gif', 'pidgeotto.gif', 'pidgey.gif', 'pidove.gif', 'pignite.gif', 'pikachu-alola.gif', 'pikachu-belle.gif', 'pikachu-cosplay.gif', 'pikachu-f.gif', 'pikachu-gmax.gif', 'pikachu-hoenn.gif', 'pikachu-kalos.gif', 'pikachu-libre.gif', 'pikachu-original.gif', 'pikachu-partner.gif', 'pikachu-phd.gif', 'pikachu-pop-star.gif', 'pikachu-popstar.gif', 'pikachu-rockstar.gif', 'pikachu-sinnoh.gif', 'pikachu-starter-f.gif', 'pikachu-starter.gif', 'pikachu-unova.gif', 'pikachu.gif', 'pikipek.gif', 'piloswine-f.gif', 'piloswine.gif', 'pincurchin.gif', 'pineco.gif', 'pinsir-mega.gif', 'pinsir.gif', 'piplup.gif', 'plasmanta.gif', 'pluffle.gif', 'plusle.gif', 'poipole.gif', 'pokestarblackbelt.gif', 'pokestarblackdoor.gif', 'pokestarbrycenman.gif', 'pokestarf00.gif', 'pokestarf002.gif', 'pokestargiant.gif', 'pokestarhumanoid.gif', 'pokestarmonster.gif', 'pokestarmt.gif', 'pokestarmt2.gif', 'pokestarsmeargle.gif', 'pokestarspirit.gif', 'pokestartransport.gif', 'pokestarufo-2.gif', 'pokestarufo.gif', 'pokestarufo2.gif', 'pokestarwhitedoor.gif', 'politoed-f.gif', 'politoed.gif', 'poliwag.gif', 'poliwhirl.gif', 'poliwrath.gif', 'polteageist-antique.gif', 'polteageist.gif', 'ponyta-galar.gif', 'ponyta.gif', 'poochyena.gif', 'popplio.gif', 'porygon-z.gif', 'porygon.gif', 'porygon2.gif', 'porygonz.gif', 'primarina.gif', 'primeape.gif', 'prinplup.gif', 'probopass.gif', 'psyduck.gif', 'pumpkaboo-large.gif', 'pumpkaboo-small.gif', 'pumpkaboo-super.gif', 'pumpkaboo.gif', 'pupitar.gif', 'purrloin.gif', 'purugly.gif', 'pyroak.gif', 'pyroar-f.gif', 'pyroar.gif', 'pyukumuku.gif', 'quagsire-f.gif', 'quagsire.gif', 'quilava.gif', 'quilladin.gif', 'qwilfish.gif', 'raboot.gif', 'raichu-alola.gif', 'raichu.gif', 'raikou.gif', 'ralts.gif', 'rampardos.gif', 'rapidash-galar.gif', 'rapidash.gif', 'raticate-alola-totem.gif', 'raticate-alola.gif', 'raticate-f.gif', 'raticate-totem-a.gif', 'raticate.gif', 'rattata-alola.gif', 'rattata-f.gif', 'rattata.gif', 'rayquaza-mega.gif', 'rayquaza.gif', 'regice.gif', 'regigigas.gif', 'regirock.gif', 'registeel.gif', 'relicanth-f.gif', 'relicanth.gif', 'remoraid.gif', 'reshiram.gif', 'reuniclus.gif', 'rhydon-f.gif', 'rhydon.gif', 'rhyhorn.gif', 'rhyperior-f.gif', 'rhyperior.gif', 'ribombee.gif', 'rillaboom.gif', 'riolu.gif', 'rockruff.gif', 'roggenrola.gif', 'rolycoly.gif', 'rookidee.gif', 'roselia-f.gif', 'roselia.gif', 'roserade-f.gif', 'roserade.gif', 'rotom-f.gif', 'rotom-fan.gif', 'rotom-frost.gif', 'rotom-h.gif', 'rotom-heat.gif', 'rotom-m.gif', 'rotom-mow.gif', 'rotom-s.gif', 'rotom-w.gif', 'rotom-wash.gif', 'rotom.gif', 'rowlet.gif', 'rufflet.gif', 'runerigus.gif', 'sableye-mega.gif', 'sableye.gif', 'salamence-mega.gif', 'salamence.gif', 'salandit.gif', 'salazzle-totem.gif', 'salazzle.gif', 'samurott.gif', 'sandaconda-gmax.gif', 'sandaconda.gif', 'sandile.gif', 'sandshrew-alola.gif', 'sandshrew.gif', 'sandslash-alola.gif', 'sandslash.gif', 'sandygast.gif', 'sawk.gif', 'sawsbuck-autumn.gif', 'sawsbuck-summer.gif', 'sawsbuck-winter.gif', 'sawsbuck.gif', 'scatterbug.gif', 'sceptile-mega.gif', 'sceptile.gif', 'scizor-f.gif', 'scizor-mega.gif', 'scizor.gif', 'scolipede.gif', 'scorbunny.gif', 'scrafty.gif', 'scraggy.gif', 'scratchet.gif', 'scyther-f.gif', 'scyther.gif', 'seadra.gif', 'seaking.gif', 'sealeo.gif', 'seedot.gif', 'seel.gif', 'seismitoad.gif', 'sentret.gif', 'serperior.gif', 'servine.gif', 'seviper.gif', 'sewaddle.gif', 'shapedo-mega.gif', 'sharpedo-mega.gif', 'sharpedo.gif', 'shaymin-sky.gif', 'shaymin.gif', 'shedinja.gif', 'shelgon.gif', 'shellder.gif', 'shellos-east.gif', 'shellos.gif', 'shelmet.gif', 'shieldon.gif', 'shiftry-f.gif', 'shiftry.gif', 'shiinotic.gif', 'shinx-f.gif', 'shinx.gif', 'shroomish.gif', 'shuckle.gif', 'shuppet.gif', 'sigilyph.gif', 'silcoon.gif', 'silicobra.gif', 'silvally-bug.gif', 'silvally-dark.gif', 'silvally-dragon.gif', 'silvally-electric.gif', 'silvally-fairy.gif', 'silvally-fighting.gif', 'silvally-fire.gif', 'silvally-flying.gif', 'silvally-ghost.gif', 'silvally-grass.gif', 'silvally-ground.gif', 'silvally-ice.gif', 'silvally-poison.gif', 'silvally-psychic.gif', 'silvally-rock.gif', 'silvally-steel.gif', 'silvally-water.gif', 'silvally.gif', 'simipour.gif', 'simisage.gif', 'simisear.gif', 'sinistea-antique.gif', 'sinistea.gif', 'sirfetchd.gif', 'sizzlipede.gif', 'skarmory.gif', 'skiddo.gif', 'skiploom.gif', 'skitty.gif', 'skorupi.gif', 'skrelp.gif', 'skuntank.gif', 'skwovet.gif', 'slaking.gif', 'slakoth.gif', 'sliggoo.gif', 'slowbro-mega.gif', 'slowbro.gif', 'slowking.gif', 'slowpoke-galar.gif', 'slowpoke.gif', 'slugma.gif', 'slurpuff.gif', 'smeargle.gif', 'smogecko.gif', 'smoguana.gif', 'smokomodo.gif', 'smoochum.gif', 'snaelstrom.gif', 'sneasel-f.gif', 'sneasel.gif', 'snivy.gif', 'snom.gif', 'snorlax-gmax.gif', 'snorlax.gif', 'snornut.gif', 'snorunt.gif', 'snover-f.gif', 'snover.gif', 'snubbull.gif', 'sobble.gif', 'solgaleo.gif', 'solosis.gif', 'solrock.gif', 'spearow.gif', 'spewpa.gif', 'spheal.gif', 'spinarak.gif', 'spinda.gif', 'spiritomb.gif', 'spoink.gif', 'spritzee.gif', 'squirtle.gif', 'stakataka.gif', 'stantler.gif', 'staraptor-f.gif', 'staraptor.gif', 'staravia-f.gif', 'staravia.gif', 'starly-f.gif', 'starly.gif', 'starmie.gif', 'staryu.gif', 'steelix-f.gif', 'steelix-mega.gif', 'steelix.gif', 'steenee.gif', 'stonjourner.gif', 'stoutland.gif', 'stratagem.gif', 'stufful.gif', 'stunfisk-galar.gif', 'stunfisk.gif', 'stunky.gif', 'substitute.gif', 'sudowoodo-f.gif', 'sudowoodo.gif', 'suicune.gif', 'sunflora.gif', 'sunkern.gif', 'surskit.gif', 'swablu.gif', 'swadloon.gif', 'swalot-f.gif', 'swalot.gif', 'swampert-mega.gif', 'swampert.gif', 'swanna.gif', 'swellow.gif', 'swinub.gif', 'swirlix.gif', 'swoobat.gif', 'sylveon.gif', 'taillow.gif', 'talonflame.gif', 'tangela.gif', 'tangrowth-f.gif', 'tangrowth.gif', 'tapubulu.gif', 'tapufini.gif', 'tapukoko.gif', 'tapulele.gif', 'tauros.gif', 'teddiursa.gif', 'tentacool.gif', 'tentacruel.gif', 'tepig.gif', 'terrakion.gif', 'thievul.gif', 'throh.gif', 'thundurus-therian.gif', 'thundurus.gif', 'thwackey.gif', 'timburr.gif', 'tirtouga.gif', 'togedemaru-totem.gif', 'togedemaru.gif', 'togekiss.gif', 'togepi.gif', 'togetic.gif', 'tomohawk.gif', 'torchic-f.gif', 'torchic.gif', 'torkoal.gif', 'tornadus-therian.gif', 'tornadus.gif', 'torracat.gif', 'torterra.gif', 'totodile.gif', 'toucannon.gif', 'toxapex.gif', 'toxel.gif', 'toxicroak-f.gif', 'toxicroak.gif', 'toxtricity-gmax.gif', 'toxtricity-lowkey.gif', 'toxtricity.gif', 'tranquill.gif', 'trapinch.gif', 'treecko.gif', 'trevenant.gif', 'tropius.gif', 'trubbish.gif', 'trumbeak.gif', 'tsareena.gif', 'turtonator.gif', 'turtwig.gif', 'tympole.gif', 'tynamo.gif', 'typenull.gif', 'typhlosion.gif', 'tyranitar-mega.gif', 'tyranitar.gif', 'tyrantrum.gif', 'tyrantum.gif', 'tyrogue.gif', 'tyrunt.gif', 'umbreon.gif', 'unfezant-f.gif', 'unfezant.gif', 'unown-b.gif', 'unown-c.gif', 'unown-d.gif', 'unown-e.gif', 'unown-exclamation.gif', 'unown-f.gif', 'unown-g.gif', 'unown-h.gif', 'unown-i.gif', 'unown-j.gif', 'unown-k.gif', 'unown-l.gif', 'unown-m.gif', 'unown-n.gif', 'unown-o.gif', 'unown-p.gif', 'unown-q.gif', 'unown-question.gif', 'unown-r.gif', 'unown-s.gif', 'unown-t.gif', 'unown-u.gif', 'unown-v.gif', 'unown-w.gif', 'unown-x.gif', 'unown-y.gif', 'unown-z.gif', 'unown.gif', 'ursaring-f.gif', 'ursaring.gif', 'uxie.gif', 'vanillish.gif', 'vanillite.gif', 'vanilluxe.gif', 'vaporeon.gif', 'venipede.gif', 'venomoth.gif', 'venonat.gif', 'venusaur-mega.gif', 'venusaur.gif', 'vespiquen.gif', 'vibrava.gif', 'victini.gif', 'victreebel.gif', 'vigoroth.gif', 'vikavolt-totem.gif', 'vikavolt.gif', 'vileplume.gif', 'virizion.gif', 'vivillon-archipelago.gif', 'vivillon-continental.gif', 'vivillon-elegant.gif', 'vivillon-fancy.gif', 'vivillon-garden.gif', 'vivillon-highplains.gif', 'vivillon-icysnow.gif', 'vivillon-jungle.gif', 'vivillon-marine.gif', 'vivillon-modern.gif', 'vivillon-monsoon.gif', 'vivillon-ocean.gif', 'vivillon-pokeball.gif', 'vivillon-polar.gif', 'vivillon-river.gif', 'vivillon-sandstorm.gif', 'vivillon-savanna.gif', 'vivillon-sun.gif', 'vivillon-tundra.gif', 'vivillon.gif', 'volbeat.gif', 'volcanion.gif', 'volcarona.gif', 'volkraken.gif', 'voltorb.gif', 'vullaby.gif', 'vulpix-alola.gif', 'vulpix.gif', 'wailmer.gif', 'wailord.gif', 'walrein.gif', 'wartortle.gif', 'watchog.gif', 'weavile-f.gif', 'weavile.gif', 'weedle.gif', 'weepinbell.gif', 'weezing-galar.gif', 'weezing.gif', 'whimsicott.gif', 'whirlipede.gif', 'whiscash.gif', 'whismur.gif', 'wigglytuff.gif', 'wimpod.gif', 'wingull.gif', 'wishiwashi-school.gif', 'wishiwashi.gif', 'wobbuffet-f.gif', 'wobbuffet.gif', 'woobat.gif', 'wooloo.gif', 'wooper-f.gif', 'wooper.gif', 'wormadam-sandy.gif', 'wormadam-trash.gif', 'wormadam.gif', 'wurmple.gif', 'wynaut.gif', 'xatu-f.gif', 'xatu.gif', 'xerneas-neutral.gif', 'xerneas.gif', 'xurkitree.gif', 'yamask-galar.gif', 'yamask.gif', 'yamper.gif', 'yanma.gif', 'yanmega.gif', 'yungoos.gif', 'yveltal.gif', 'zacian-crowned.gif', 'zacian.gif', 'zamazenta-crowned.gif', 'zamazenta.gif', 'zangoose.gif', 'zapdos.gif', 'zebstrika.gif', 'zekrom.gif', 'zeraora.gif', 'zigzagoon-galar.gif', 'zigzagoon.gif', 'zoroark.gif', 'zorua.gif', 'zubat-f.gif', 'zubat.gif', 'zweilous.gif', 'zygarde-10.gif', 'zygarde-complete.gif', 'zygarde.gif']
for i in filename:
url = 'https://play.pokemonshowdown.com/sprites/ani/{}'.format(i)
file_name = str(i[:-4])
dl_img(url, 'files/pokemon/front', file_name)
|
8,391 | e11a04cad967ae377449aab8b12bfde23e403335 | import webbrowser
import time
total = 3
count = 0
while count<total:
webbrowser.open('https://www.youtube.com/watch?v=GoSBNNgf_Vc')
time.sleep(5*60*60)
count+=1
|
8,392 | 689c6c646311eba1faa93cc72bbe1ee4592e45bc | #!/usr/bin/env python3
from typing import ClassVar, List
print(1, 2)
# Annotated function (Issue #29)
def foo(x: int) -> int:
return x + 1
# Annotated variables #575
CONST: int = 42
class Class:
cls_var: ClassVar[str]
def m(self):
xs: List[int] = []
# True and False are keywords in Python 3 and therefore need a space.
#: E275:13 E275:14
norman = True+False
#: E302+3:0
def a():
pass
async def b():
pass
# Okay
async def add(a: int = 0, b: int = 0) -> int:
return a + b
# Previously E251 four times
#: E221:5
async def add(a: int = 0, b: int = 0) -> int:
return a + b
# Previously just E272+1:5 E272+4:5
#: E302+3 E221:5 E221+3:5
async def x():
pass
async def x(y: int = 1):
pass
#: E704:16
async def f(x): return 2
a[b1, :] == a[b1, ...]
# Annotated Function Definitions
# Okay
def munge(input: AnyStr, sep: AnyStr = None, limit=1000,
extra: Union[str, dict] = None) -> AnyStr:
pass
#: E225:24 E225:26
def x(b: tuple = (1, 2))->int:
return a + b
#: E252:11 E252:12 E231:8
def b(a:int=1):
pass
if alpha[:-i]:
*a, b = (1, 2, 3)
# Named only arguments
def foo(*, asdf):
pass
def foo2(bar, *, asdf=2):
pass
|
8,393 | a9947884e805cc8fcb6bff010a5f6e0ff0bb01fe | import math
import numpy as np
# import tkinter
import tensorflow as tf
from matplotlib import axis
import os
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
class MD(BaseEstimator, TransformerMixin):
def __init__(self, data, input_size, epoch,
batch_size, iteration, alpha=1.0, n_neg_samples=10,
random_seed=2020):
# bind params to class
# network parameters.
self.iteration = iteration
self.epoch = epoch
self.batch_size = batch_size
self.learning_rate = 0.01
self.random_seed = random_seed
self.phase = True
self.first_layer_size = 256
self.second_layer_size = 128
self.third_layer_size = 128
self.input_size = input_size
# data.
self.X_train_ben = data[0]
self.X_train_mal = data[1]
self.X_test_ben = data[2]
self.X_test_mal = data[3]
# evaluation.
self.accuracy_list = [] # accuracy during training
self.fmeasure_list = [] # fmeasure during training
self.clusters_dist = [] # distance between clusters centroid
self.evaluation_metrics_list = {'accuracy': [], 'precision': [], 'recall': [],
'fmeasure': []} # evaluation metrics of test data for all epochs
self.FinalCenters = {'benignCenter': 0, 'malwareCenter': 0}
# init all variables in a tensorflow graph
self._init_graph()
def _init_graph(self):
'''
Init a tensorflow Graph containing: input data, variables, model, loss, optimizer
'''
self.graph = tf.Graph()
with self.graph.as_default(): # , tf.device('/cpu:0'):
# Set graph level random seed.
tf.set_random_seed(self.random_seed)
# Input data.
self.train_data = tf.placeholder(tf.float32,
shape=[None, self.input_size]) # batch_size * input_size
self.train_labels = tf.placeholder(tf.float32, shape=[None, 1]) # batch_size * 1
self.train_labels_center = tf.placeholder(tf.float32, shape=[None,
self.third_layer_size]) # batch_size * third_layer_size
self.train_labels_center_disagree = tf.placeholder(tf.float32, shape=[None,
self.third_layer_size]) # batch_size * third_layer_size
# Variables.
self.weights = self._initialize_weights()
# the embedding layer.
self.embedding_layer = tf.keras.layers.Embedding(256, 32, input_length=324)
self.embedding_result = self.embedding_layer(self.train_data)
self.embedding_result = tf.layers.Flatten()(self.embedding_result)
# the first hidden layer.
self.net1 = tf.matmul(self.embedding_result, self.weights['layer1']) # batch_size * first_layer_size
self.layer1 = tf.layers.batch_normalization(self.net1, training=self.phase)
self.layer1 = tf.nn.tanh(self.layer1)
# the second hidden layer.
self.net2 = tf.matmul(self.layer1, self.weights['layer2'])
self.net2 = tf.layers.batch_normalization(self.net2, training=self.phase)
self.net2 = tf.nn.relu(self.net2)
self.layer2 = tf.layers.dropout(self.net2, rate=0.3, training=self.phase)
# the third hidden layer.
self.net3 = tf.matmul(self.layer2, self.weights['layer3'])
self.layer3 = tf.nn.tanh(self.net3)
# loss function.
self.cross_entropy = tf.reduce_mean(tf.losses.mean_squared_error(self.train_labels_center, self.layer3))
# optimizer.
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cross_entropy)
# init.
self.init = tf.initialize_all_variables()
self.sess = tf.Session()
self.sess.run(self.init)
def _initialize_weights(self):
self.all_weights = dict()
self.all_weights['layer1'] = tf.Variable(
tf.random.normal([10368, self.first_layer_size], mean=0.0, stddev=1)) # input_size * attr_dim
self.all_weights['layer2'] = tf.Variable(
tf.random.normal([self.first_layer_size, self.second_layer_size], mean=0.0,
stddev=1)) # input_size * attr_dim
self.all_weights['layer3'] = tf.Variable(
tf.random.normal([self.second_layer_size, self.third_layer_size], mean=0.0,
stddev=1)) # input_size * attr_dim
self.all_weights['layer1'] = tf.Variable(
tf.random.uniform([10368, self.first_layer_size], minval=-1,
maxval=1)) # input_size * attr_dim
self.all_weights['layer2'] = tf.Variable(
tf.random.uniform([self.first_layer_size, self.second_layer_size], minval=-1,
maxval=1)) # input_size * attr_dim
self.all_weights['layer3'] = tf.Variable(
tf.random.uniform([self.second_layer_size, self.third_layer_size], minval=-1,
maxval=1)) # input_size * attr_dim
# --------------------------------------------------------------------------
self.all_weights['layer1'] = tf.get_variable("w", [32 * self.input_size, self.first_layer_size],
initializer=tf.initializers.random_normal(mean=0, stddev=0.8),
regularizer=tf.keras.regularizers.l2(
0.01)) # input_size * attr_dim
self.all_weights['layer2'] = tf.get_variable("w2", [self.first_layer_size, self.second_layer_size],
initializer=tf.initializers.random_normal(mean=0,
stddev=0.8),
regularizer=tf.keras.regularizers.l2(
0.01)) # input_size * attr_dim
self.all_weights['layer3'] = tf.get_variable("w3", [self.second_layer_size, self.third_layer_size],
initializer=tf.initializers.random_normal(mean=0, stddev=0.8),
regularizer=tf.keras.regularizers.l2(
0.01)) # input_size * attr_dim
return self.all_weights
def kmeans_clustering(self, point, size, true_labels):
self.kmeans = KMeans(n_clusters=2, random_state=10, init='k-means++', n_init=20).fit(point)
self.kmeans_labels = self.kmeans.labels_
# find index of samples that are in the first cluster
self.label_list_0 = np.where(self.kmeans_labels == 0)[0]
# get labels of samples that are in the first cluster
temp = [true_labels[i][0] for i in self.label_list_0]
temp.append(2)
# determine label(cluster center) of benign and malware group based on the majority samples in each cluster
counts = np.bincount(temp)
if counts[0] > counts[1]: # counts[0] : number of benign in the first cluster
benign_center = self.kmeans.cluster_centers_[0]
malware_center = self.kmeans.cluster_centers_[1]
else:
benign_center = self.kmeans.cluster_centers_[1]
malware_center = self.kmeans.cluster_centers_[0]
# set label for each sample
new_labels = np.zeros((size, self.third_layer_size))
for i in range(size):
if true_labels[i][0] == 0.0:
new_labels[i] = benign_center
else:
new_labels[i] = malware_center
self.FinalCenters = {'benignCenter': benign_center, 'malwareCenter': malware_center}
return new_labels
def partial_fit(self, X): # fit a batch
# get network output.
feed_dict = {self.train_data: X['batch_data_train']}
self.points = self.sess.run((self.layer3), feed_dict=feed_dict)
# apply clustering to find expected output.
new_labels = self.kmeans_clustering(self.points, len(X['batch_data_label']), X['batch_data_label'])
self.clusters_dist.append(np.linalg.norm(self.kmeans.cluster_centers_[0] - self.kmeans.cluster_centers_[1]))
feed_dicts = {self.train_data: X['batch_data_train'],
self.train_labels_center: new_labels}
loss, opt = self.sess.run((self.cross_entropy, self.train_step), feed_dict=feed_dicts)
# print(loss)
# print('------------')
metrics = self.evaluate(X['batch_data_label'], self.kmeans_labels, len((X['batch_data_label'])))
self.accuracy_list.append(metrics[0])
self.fmeasure_list.append(metrics[3])
return loss
def evaluate(self, true_labels, kmeans_labels, size):
"""
:param true_labels: label of malware and benign samples as a 2D array(number of samples * 1) of 0 and 1
:param kmeans_labels: contains a list of 0 and 1 that each cell shows the sample cluster number
:param size: number of samples
:return: accuracy, precision, recall, f_measure
"""
# find index of samples that are in the first cluster
self.label_list_0 = np.where(kmeans_labels == 0)[0]
self.label_list_1 = np.where(kmeans_labels == 1)[0]
# get labels of samples that are in the first cluster
temp = [true_labels[i][0] for i in self.label_list_0]
temp1 = [true_labels[i][0] for i in self.label_list_1]
temp1.append(2)
temp.append(2)
# determine label(cluster center) of benign and malware group based on the majority samples in each cluster
counts = np.bincount(temp)
counts2 = np.bincount(temp1)
if counts[0] > counts[1]:
accuracy = (counts[0] + counts2[1]) / size
precision = counts2[1] / (counts2[1] + counts2[0])
recall = counts2[1] / (counts2[1] + counts[1])
f_measure = 2 * ((precision * recall) / (precision + recall))
else:
accuracy = (counts[1] + counts2[0]) / size
precision = counts[1] / (counts[1] + counts[0])
recall = counts[1] / (counts[1] + counts2[1])
f_measure = 2 * ((precision * recall) / (precision + recall))
return accuracy, precision, recall, f_measure
def final_fit(self, X, true_labels):
self.phase = False
# get network output for test data.
feed_dict = {self.train_data: X['data_test']}
self.points = self.sess.run(self.layer3, feed_dict=feed_dict)
# determine label of each test sample based on the euclidean distance
self.predicted_Labels = []
for i in range(len(true_labels)):
if np.linalg.norm(self.FinalCenters['benignCenter'] - self.points[i]) < np.linalg.norm(
self.FinalCenters['malwareCenter'] - self.points[i]):
self.predicted_Labels.append([0])
else:
self.predicted_Labels.append([1])
tn, fp, fn, tp = confusion_matrix(true_labels, self.predicted_Labels).ravel()
accuracy = (tp + tn) / (tp + tn + fn + fp)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_measure = 2 * (precision * recall) / (precision + recall)
self.evaluation_metrics_list['accuracy'].append(np.float("{0:.4f}".format(accuracy)))
self.evaluation_metrics_list['precision'].append(np.float("{0:.4f}".format(precision)))
self.evaluation_metrics_list['recall'].append(np.float("{0:.4f}".format(recall)))
self.evaluation_metrics_list['fmeasure'].append(np.float("{0:.4f}".format(f_measure)))
print("accuracy", "precision", "recall", "f_measure", sep="\t\t\t\t\t")
print(accuracy, precision, recall, f_measure, sep="\t\t\t")
return 0
def train(self): # fit a dataset
for iter in range(self.iteration):
self.log("iteration {} ".format(iter))
for epoch in range(self.epoch):
self.accuracy_list = []
self.fmeasure_list = []
self.clusters_dist = []
self.log("epoch %s" % (epoch))
total_batches = int(len(self.X_train_ben['data']) / self.batch_size)
self.log('total_batches in epoch %s : %s ' % (epoch, total_batches))
start_index = 0
end_index = start_index + self.batch_size
self.counter = 0
# Loop over all batches.
for i in range(total_batches + 1):
self.counter += 1
# generate a batch data
batch_xs = {}
batch_xs['batch_data_train'] = np.concatenate(
[self.X_train_ben['data'][start_index:end_index], \
self.X_train_mal['data'][start_index:end_index]])
batch_xs['batch_data_label'] = np.concatenate(
[self.X_train_ben['label'][start_index:end_index], \
self.X_train_mal['label'][start_index:end_index]])
# Fit training using batch data
end_index = end_index + self.batch_size
cost = self.partial_fit(batch_xs)
# test
batch_test = {}
batch_test["data"] = np.concatenate([self.X_test_ben['data'], self.X_test_mal['data']])
batch_test["label"] = np.concatenate([self.X_test_ben['label'], self.X_test_mal['label']])
self.final_fit(batch_test, batch_test["label"])
# init all variables in a tensorflow graph for the next fold
self.sess.run(self.init)
return self.accuracy_list, self.fmeasure_list, self.clusters_dist, self.evaluation_metrics_list
def log(self, message):
print(message)
def write_result_to_file(self, variable, message):
# file = open('result.txt', 'a+')
file = open('results/' + str(self.batch_size) + '/results.txt', 'a+')
file.write(message + "\n")
file.write(str(np.mean(variable['accuracy'])) + '+' + str(np.var(variable['accuracy'])) + '\t' + str(
np.mean(variable['precision'])) + '\t' + str(
np.mean(variable['recall'])) + '\t' + str(
np.mean(variable['fmeasure'])) + '+' + str(np.var(variable['fmeasure'])) + '\n')
|
8,394 | 86f33895e9ae0e026d7d6e40e611796b2dc2c713 | """@brief the routes for Flask application
"""
import hashlib
import json
import time
import requests
from flask import render_template, url_for
from soco import SoCo
from app import app
app.config.from_pyfile("settings.py")
sonos = SoCo(app.config["SPEAKER_IP"])
def gen_sig():
"""@brief return the MD5 checksum """
return hashlib.md5(
(
app.config["ROVI_API_KEY"]
+ app.config["ROVI_SHARED_SECRET"]
+ repr(int(time.time()))
).encode("utf-8")
).hexdigest()
def get_track_image(artist, album):
"""@brief get the track image from Rovi """
blank_image = url_for("static", filename="img/blank.jpg")
if "ROVI_SHARED_SECRET" not in app.config:
return blank_image
if "ROVI_API_KEY" not in app.config:
return blank_image
headers = {"Accept-Encoding": "gzip"}
req = requests.get(
"http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey="
+ app.config["ROVI_API_KEY"]
+ "&sig="
+ gen_sig()
+ "&name= "
+ album
+ "&performername="
+ artist
+ "&include=images&size=1",
headers=headers,
timeout=30,
)
if req.status_code != requests.codes.ok:
return blank_image
result = json.loads(req.content)
try:
return result["matchResponse"]["results"][0]["album"]["images"][0]["front"][3]["url"]
except (KeyError, IndexError):
return blank_image
def current_track():
"""@brief get the current track information from Sonos """
track = sonos.get_current_track_info()
track["title"] = track["title"][:30]
track["artist"] = track["artist"][:30]
return track
@app.route("/play")
def play():
"""@brief the play function """
sonos.play()
return "Ok"
@app.route("/pause")
def pause():
"""@brief the pause function """
sonos.pause()
return "Ok"
@app.route("/following")
def following():
"""@brief the following function """
sonos.next()
return "Ok"
@app.route("/previous")
def previous():
"""@brief the previous function """
sonos.previous()
return "Ok"
@app.route("/volume")
def volume():
"""@brief get the actual volume """
vol = sonos.volume
return vol
@app.route("/volume_up")
def volume_up():
"""@brief the volume up function """
sonos.set_relative_volume(10)
return "Ok"
@app.route("/volume_down")
def volume_down():
"""@brief the volume down function """
sonos.set_relative_volume(-10)
return "Ok"
@app.route("/volume_mute")
def volume_mute():
"""@brief the mute function """
sonos.mute = True
return "Ok"
@app.route("/volume_unmute")
def volume_unmute():
"""@brief the unmute function """
sonos.mute = False
return "Ok"
@app.route("/track_01")
def track_01():
"""@brief switch to new track """
sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)
return "Ok"
@app.route("/track_02")
def track_02():
"""@brief switch to new track """
sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer', title='Radio PSR Live', force_radio=True)
return "Ok"
@app.route("/track_03")
def track_03():
"""@brief switch to new track """
sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)
return "Ok"
@app.route("/track_04")
def track_04():
"""@brief switch to new track """
sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)
return "Ok"
@app.route("/info-light")
def info_light():
"""@brief the info-light function """
track = current_track()
return json.dumps(track)
@app.route("/info")
def info():
"""@brief the info function """
track = current_track()
track["image"] = get_track_image(track["artist"], track["album"])
transport = sonos.get_current_transport_info()
track["playing"] = transport["current_transport_state"] != "STOPPED"
track["mute"] = sonos.mute
return json.dumps(track)
@app.route("/")
@app.route('/index')
def index():
"""@brief the index function """
track = current_track()
track["image"] = get_track_image(track["artist"], track["album"])
return render_template("index.html", track=track)
|
8,395 | 4a14265a9a2338be66e31110bba696e224b6a70f | from django.shortcuts import render
from django.http import HttpResponse
from chats.models import Chat
from usuario.models import Usuario
# Create your views here.
def chat(request):
chat_list = Chat.objects.order_by("id_chat")
chat_dict = {'chat': chat_list}
return render(request,'chats/Chat.html', context=chat_dict) |
8,396 | 9816a8265bcdb8c099f599efbe1cfe1a554e71f5 | from django.conf.urls import url
from price_App import views
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = [
url(r'^api/price/(?P<pk>[0-9]+)$', views.product_price),
url(r'^api/price_history/(?P<pk>[0-9]+)$', views.product_history),]
urlpatterns = format_suffix_patterns(urlpatterns)
|
8,397 | 76a22408bb423d9a5bc5bc007decdbc7c6cc98f7 | """
Neuraxle Tensorflow V1 Utility classes
=========================================
Neuraxle utility classes for tensorflow v1.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tensorflow as tf
from neuraxle.base import BaseSaver, BaseStep, ExecutionContext
from neuraxle.hyperparams.space import HyperparameterSamples, HyperparameterSpace
from neuraxle_tensorflow.tensorflow import BaseTensorflowModelStep
class TensorflowV1ModelStep(BaseTensorflowModelStep):
"""
Base class for tensorflow 1 steps.
It uses :class:`TensorflowV1StepSaver` for saving the model.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,
:class:`~neuraxle.base.BaseStep`
"""
HYPERPARAMS = HyperparameterSamples({})
HYPERPARAMS_SPACE = HyperparameterSpace({})
def __init__(
self,
create_graph,
create_loss,
create_optimizer,
create_feed_dict=None,
data_inputs_dtype=None,
expected_outputs_dtype=None,
variable_scope=None,
has_expected_outputs=True,
print_loss=False,
print_func=None
):
BaseTensorflowModelStep.__init__(
self,
create_model=create_graph,
create_loss=create_loss,
create_optimizer=create_optimizer,
create_inputs=create_feed_dict,
data_inputs_dtype=data_inputs_dtype,
expected_outputs_dtype=expected_outputs_dtype,
step_saver=TensorflowV1StepSaver(),
print_loss=print_loss,
print_func=print_func
)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) -> BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) -> BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
def fit(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.fit_model(data_inputs, expected_outputs)
def fit_model(self, data_inputs, expected_outputs=None) -> BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {
self['data_inputs']: data_inputs
}
if self.has_expected_outputs:
feed_dict.update({
self['expected_outputs']: expected_outputs
})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self, data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']], feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
def transform_model(self, data_inputs):
"""
Transform tensorflow model using the variable scope.
:param data_inputs:
:return:
"""
inference_output_name = self._get_inference_output_name()
feed_dict = {
self['data_inputs']: data_inputs
}
results = self.session.run([self[inference_output_name], self['loss']], feed_dict=feed_dict)
self.add_new_loss(results[1], test_only=True)
return results[0]
def _get_inference_output_name(self):
"""
Return the output tensor name for inference (transform).
In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.
:return:
"""
inference_output_name = 'output'
if len(self['inference_output'].get_shape().as_list()) > 0:
inference_output_name = 'inference_output'
return inference_output_name
def __getitem__(self, item):
"""
Get a graph tensor by name using get item.
:param item: tensor name
:type item: str
:return: tensor
:rtype: tf.Tensor
"""
if ":" in item:
split = item.split(":")
tensor_name = split[0]
device = split[1]
else:
tensor_name = item
device = "0"
try:
result = self.graph.get_tensor_by_name("{0}/{1}:{2}".format(self.variable_scope, tensor_name, device))
except KeyError:
result = None
if result is None:
try:
result = self.graph.get_operation_by_name("{0}/{1}".format(self.variable_scope, tensor_name))
except KeyError:
result = tf.get_variable(tensor_name, [])
return result
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context, step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.meta".format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.index".format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), "{0}.ckpt".format(step.get_name()))
|
8,398 | 8364264851895ccabeb74fd3fab1d4f39da717f8 | from django.apps import AppConfig
class StonewallConfig(AppConfig):
name = 'stonewall'
|
8,399 | e6010ec05ec24dcd2a44e54ce1b1f11000e775ce | #########################################################################
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
## Markdown has these types of paragraph: heading, text, list item (bullet or numbered),
## codeblock, table, and block quote.
##
## This script fixes up differences in Markdown dialect, between Github-MD and doxia-markdown.
## Specifically, it fixes these problems:
## 1. In Github-MD, bullets and codeblock starts are self-delimiting. In doxia-markdown, they
## must be separated from preceding text or (in the case of codeblocks) bullets, by a blank line.
## Failure to do so causes the bullet or codeblock delimiter to be interpreted as ordinary text,
## and the content gets munched into the preceding paragraph. The codeblock delimiter (```) as text
## gets interpreted as a codephrase delimiter (`) plus a preceding or following empty codephrase (``).
## 2. Github-MD is liberal in regard to what an 'indent' is, allowing 1, 2, 4, or 8 blanks, or
## a tab. We mostly use 2 blanks. Doxia-markdown requires strictly 4 spaces or a tab. Failure
## to adhere to this requirement causes indents to be ignored or misinterpreted, leading again to
## paragraph munching and delimiter ignoring.
## 3. In Doxia-markdown, if you indent below a header or text paragraph, it is interpreted as
## an implicit codeblock start. In Github-MD, we only start codeblocks with the explicit
## codeblock delimiter (```) and sometimes indent below text just for visual emphasis, so the
## doxia-markdown interpretation is unwelcome. Thus, in our rewrite, we disallow indenting below
## text or headers. This may make the text less pretty than the Github-MD presentation, but it
## avoids the incorrect codeblocking.
## 4. In Doxia-markdown, the indent of the end-codeblock delimiter must match that of the
## begin-codeblock delimiter, or it won't be recognized and the codeblock will run on.
## 5. Relative links need to be re-written. '.md' files need to be changed to '.html', and
## as best we can we will re-write named anchors referring to tags autogenerated from headers.
## The problem with generated tags is that Github-MD forces header text to lower-case, and
## replaces blank spaces with hyphens, while doxia-markdown leaves case unchanged, and replaces
## blanks with underscores. Fortunately we seem to have a culture of using link references that
## are typographically the same as the header text, so we have some basis for fixing most links.
## 6. H1 headers don't get named anchors generated, unlike H2 and lower headers. Don't know
## why doxia-markdown has this deficiency, perhaps it assumes H1 will only be used once at the
## beginning of the doc. We will insert an explicit anchor just before the H1 headers, to fix.
##
## So far, we're ignoring tables and block quotes.
##
## This script also manages the re-writing of named files to *.tmp, then mv to replace the original file.
import sys
import os
import inspect
import re
# These are the characters excluded by Markdown from use in auto-generated anchor text for Headings.
EXCLUDED_CHARS_REGEX_GHM = r'[^\w\-]' # all non-alphanumerics except "-" and "_". Whitespace are previously converted.
EXCLUDED_CHARS_REGEX_DOX = r'[^\w\.\-]' # all non-alphanumerics except "-", "_", and ".". Whitespace are previously converted.
def report_error(s) :
print >>sys.stderr, "ERROR: " + s
print >>sys.stderr, "on line: " + str(FNR) + " in file: " + FILENAME
print >>sys.stderr, inputline
exit(1)
def trace(msg) :
if TRACE :
print >>sys.stderr, "TRACE: " + inspect.currentframe().f_back.f_code.co_name + " : InputLine " + str(FNR) + " : " + msg
class INDENT_STACK :
'This class maintains the indent stack during doc parsing.'
def __init__(self) :
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def init_indent(self) :
del self.my_stack
self.my_stack = [ {'physical' : 0, 'logical' : 0, 'type' : 'none' } ]
def push_indent(self, n, new_type) :
#Increment the logical depth only if under a bullet type. This fixes problem #3.
level = self.logical_indent_level() + (self.current_type() == "bullet") # plus 1 if true
self.my_stack.append( {'physical':n, 'logical':level, 'type':new_type} )
def set_current_type(self, new_type) :
# adjust topmost type
self.my_stack[-1]['type'] = new_type
def pop_indent(self) :
if len(self.my_stack) > 1 :
return self.my_stack.pop()['physical']
else :
return 0
def current_indent(self) :
# top of stack, physical
return self.my_stack[-1]['physical']
def logical_indent_level(self) :
# top of stack, logical
return self.my_stack[-1]['logical']
def current_type(self) :
# top of stack, type
return self.my_stack[-1]['type']
## End class INDENT_STACK
global indent_stack
indent_stack = INDENT_STACK() # single instance
def convert_tabs(s) :
# Courtesy of Python, this does a real column-aware tab expansion.
# If this doesn't work, we'll need to go back to erroring on " \t", that is, spaces followed by tabs.
trace("orig length {0}".format(len(s)) )
ct = s.count("\t")
s = s.expandtabs(4)
trace("after {0} tab substitutions, end length is {1}".format(ct, len(s)) )
return s
def fix_prefix_blanks(new_type) :
global inputline
# Fix up the indenting (prefix blanks) in inputline. This fixes problem #2.
# Don't worry about blank lines here, they are filtered out before calling this method.
# Both uses and maintains the indent stack, which is why we need the new_type passed in.
prefix_blanks = re.search(r'^[\s]*', inputline)
if prefix_blanks :
prefix_blanks = prefix_blanks.group()
trace("After prefix-blanks match, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
prefix_blanks = convert_tabs(prefix_blanks)
else :
prefix_blanks = ""
trace("After convert_tabs, prefix_blanks is |" + prefix_blanks + "| length is " + str(len(prefix_blanks)) )
# prefix_blanks now contains the 'physical' indent of the current paragraph, after tab substitution.
# The indent of this paragraph may be > or == to the previous paragraph. Those are the easy cases.
# If the indent is less than previous, is it equal to the indent of the next lower indented object?
# Or of a lower yet object? Or is it intermediate between two lower objects currently in the stack?
# The latter case is an anomoly, but there's no enforcement in Github-MD.
# The following logic is an empirical reverse engineering, that seems adequate so far.
# It basically says, find a prior level of indent that this is not less than, and then pretend that
# the objects between it and this object weren't there.
trace("current logical_indent_level is {0} and current_indent is {1}".format(
indent_stack.logical_indent_level(), indent_stack.current_indent() ))
while len(prefix_blanks) < indent_stack.current_indent() :
indent_stack.pop_indent()
if len(prefix_blanks) > indent_stack.current_indent() :
indent_stack.push_indent(len(prefix_blanks), new_type)
else : # len(prefix_blanks) == indent_stack.current_indent()
indent_stack.set_current_type(new_type)
trace(("After evaluating this line's prefix-blanks and prev_type, new logical_indent_level() is {0} " +
"and current_indent is {1}").format(indent_stack.logical_indent_level(), indent_stack.current_indent() ))
# Now whack off the prefix blanks, and replace with a standardized string of blanks appropriate to
# the logical indent level.
trace("Orig line is " + inputline)
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline, 1)
trace("New line is " + inputline)
def rewrite_relative_links() :
global inputline
trace("entering with line: " + inputline)
# Fix up the relative links in inputline. This fixes problem #5.
num_links = inputline.count("](")
links = re.findall(r'\[[^\]]+\]\([^)]+\)', inputline)
num_whole_links = len(links)
trace("num_links = {0}, num_whole_links = {1}".format(num_links, num_whole_links))
if (num_links != num_whole_links) :
if re.search(r'\[[^\][!]*\![\s]*\[', inputline) :
# Nested link label expressions, with '!'.
# Special case where a link value is inlined into the link label,
# as in the first line of the base README.md file. Bail on such lines.
trace("WARNING: Found nested link label expressions.")
return
else :
report_error("Found link split across multiple lines. We can't process this.")
for linkitem in links :
pieces = re.search(r'(\[[\s`]*)([^\]]*[^\s`\]])([\s`]*\]\([\s]*)([^\s]+)([\s]*\))', linkitem).groups()
trace("Link: " + linkitem)
trace("Pieces: " + " ".join( (pieces[0],pieces[1],pieces[2],pieces[3],pieces[4]) ))
labeltext = pieces[1]
href = pieces[3]
trace("Extracted labeltext is: " + labeltext)
trace("Extracted href is: " + href)
if re.search(r'^http|\?', href) :
# Don't rewrite absolute or parameterized URLs; neither is native to this markdown book.
trace("skipping absolute or parameterized URL")
continue
# Rewrite implicit index references to explicit, so the book will work as well
# with 'file:///' preview as with a real web server.
# We are only concerned with file path names here, so split at '#' if present.
num_sharps = href.count("#")
if (num_sharps >= 2) :
report_error("Multiple #'s in a single link href.")
elif (num_sharps == 1) :
# Implicit index references are directory names, which seldom have a filetype suffix.
# On the other hand, explicit file references must have filetype, else the browser
# won't know what to do with it. So if no filetype extension, assume is a directory
# and add 'index.html'. Skip if this is an intra-document link.
if not re.search(r'^#|\.[^/#]+#', href) :
if not href.count("/#") :
href = re.sub(r'#', "/#", href, 1)
href = re.sub(r'/#', "/index.html#", href, 1)
# Fix up '.md' references.
href = re.sub(r'^README\.md#', "index.html#", href)
href = re.sub(r'/README\.md#', "/index.html#", href)
href = re.sub(r'\.md#', ".html#", href)
else : # num_sharps == 0
# Same logic as above, just at $ instead of #.
if not re.search(r'\.[^/]+$', href) :
if not href.endswith("/") :
href = href + "/"
href = re.sub(r'/$', "/index.html", href)
# Fix up '.md' references.
href = re.sub(r'^README\.md$', "index.html", href)
href = re.sub(r'/README\.md$', "/index.html", href)
href = re.sub(r'\.md$', ".html", href)
trace("After .md fixup, href is: " + href)
# Re-write named anchors referring to generated tags.
sharp = href.find("#")
if (sharp >= 0) :
named_anchor = href[sharp+1 : ]
trace('named_anchor = "' + named_anchor + '"')
trace('labeltext = "' + labeltext + '"')
scratch = labeltext.lower() # Github-MD forces all anchors to lowercase
scratch = re.sub(r'[\s]', "-", scratch) # convert whitespace to "-"
scratch = re.sub(EXCLUDED_CHARS_REGEX_GHM, "", scratch) # strip non-alphanumerics
if (scratch == named_anchor) :
trace("Found a rewritable case")
scratch = labeltext # Doxia-markdown doesn't change case
scratch = re.sub(r'[\s]', "_", scratch) # convert whitespace to "_"
scratch = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", scratch) # strip non-alphanumerics except "."
href = re.sub("#" + named_anchor, "#" + scratch, href)
trace("After anchor rewrite, href is: " + href)
# Now swap out the bad href for the fixed one in inputline.
if (href != pieces[3]) :
# Assemble the full link string to prevent similar substrings (to href) in different contexts being substituted.
scratch = pieces[0] + pieces[1] + pieces[2] + href + pieces[4]
trace("Fixed link text is: " + scratch)
trace("linkitem is still: " + linkitem)
k = inputline.find(linkitem)
inputline = inputline[ : k] + scratch + inputline[ k + len(linkitem) : ]
trace("Fixed inputline is: " + inputline)
################################################
# begin state machine
global inputline, active_type
BLANKS = " "
TRACE = 0
FNR = -1
trace("Starting trace")
# Github uses relative indents, but doxia wants only and exactly multiples of 4.
# To turn the more forgiving into more regular, we must track both logical and actual indents.
indent_stack.init_indent()
# Paragraph type can be none, text, bullet, code, or heading.
# Note 'current_type()' used in managing the logical indent level on the indent stack,
# and 'active_type' used in the pattern recognition state machine, are deliberately different.
active_type = "none"
# Note: order of the below 'if' clauses is critically important for the state machine.
# Don't change the order.
if len(sys.argv) <= 1 :
report_error("Please provide names of files to be processed, as command line arguments.")
for FILENAME in sys.argv[1:] :
infile = open(FILENAME, 'r')
outfile = open(FILENAME + ".tmp", 'w')
FNR = 0
H1_COUNT = 0
for inputline in infile :
FNR += 1
inputline = inputline.rstrip("\n")
if '](' in inputline :
# Detect lines with hyperlinks in them, and re-write them if necessary and possible.
# This is the only fall-through block, and we put it at the very beginning.
rewrite_relative_links(); # in inputline
# Fall through for further processing.
if (active_type == "code") and ("```" not in inputline) :
trace("in codeblock, regular line")
# what happens in the codeblock, stays in the codeblock
# Put this case first (after link detection), so we don't have to test it in all the other cases.
print >>outfile, inputline
continue
if (active_type == "code") and ("```" in inputline) :
trace("in codeblock, end delimiter line")
# detect end of codeblock
# This must be the second case.
if re.search(r'```[\s]*[^\s]', inputline) :
# If there's text following the end-``` on the same line, error out and fix it in the source file.
report_error("Text following codeblock end delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
active_type = "none"
# Force the indenting of the end-``` to match the beginning. This fixes problem #4.
inputline = re.sub(r'^[\s]*', BLANKS[0 : 4*indent_stack.logical_indent_level()], inputline)
print >>outfile, inputline
continue
if (active_type != "code") and ("```" in inputline) :
trace("start codeblock, delimiter line")
# detect start of codeblock
if re.search(r'[^\s][\s]*```', inputline) :
# If there's text preceding the begin-``` on the same line, error out and fix it in the source file.
report_error("Text preceding codeblock start delimiter (```) on same line.")
if re.search(r'```.*```', inputline) :
# If there are two sets of triple-ticks on the same line, that's a problem too.
report_error("Two sets of codeblock delimiters (```) on same line.")
if active_type == "text" or active_type == "bullet" :
print >>outfile, "" # Need preceding blank line before codeblock, in doxia.
active_type = "code"
fix_prefix_blanks(active_type) # in inputline
print >>outfile, inputline
continue
if re.search(r'^[\s]*$', inputline) :
trace("blank line")
# detect blank lines
active_type = "none"
print >>outfile, inputline # Perhaps this should be print "" instead?
continue
if re.search(r'^[\s]*([*+-]|[\d]+\.)[\s]', inputline) :
trace("bullet line")
# detect bullet line (numbered or not)
if (active_type == "text") :
print >>outfile, "" # Need preceding blank line between text and bullet, in doxia. This fixes problem #1.
active_type = "bullet"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
if inputline.startswith("#") :
trace("header line")
# detects header lines, which are self-delimiting, and cannot have indenting
# Header line resets the indenting as well as current type
active_type = "none"
indent_stack.init_indent()
if re.search(r'^#[^#]', inputline) :
# First-level headers ("H1") need explicit anchor inserted (Doxia style). This fixes problem #6.
anchor_name = re.sub(r' ', "_", inputline[1:].strip())
anchor_name = re.sub(EXCLUDED_CHARS_REGEX_DOX, "", anchor_name)
anchor_text = '<a name="' + anchor_name + '"></a>'
if H1_COUNT == 0 :
# Treat the first header differently - put the header after instead of before
# This is necessary to preserve document metadata titling in generated html.
# However, it means the title itself gets hidden above the top of window, when the link is used.
H1_COUNT = 1
print >>outfile, inputline
print >>outfile, anchor_text
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line after.
else :
print >>outfile, "" # Anchors aren't self-delimiting, so insert a blank line first.
print >>outfile, anchor_text
print >>outfile, inputline
else :
# H2 or deeper level of header, doxia auto-generates anchor.
print >>outfile, inputline
continue
if re.search(r'^[\s]*#', inputline) :
trace("header line, bad")
report_error("Header specification character (#) detected with indenting. This is presumed to be an error, since it will render as text. If intentional, put a period or other printable character before it.")
## default action -- last case in state machine switch
trace("text line")
# Everything else is text-like, and therefore continues active_type, unless none.
if (active_type == "none") :
# Start new text paragraph.
active_type = "text"
fix_prefix_blanks(active_type); # in inputline
print >>outfile, inputline
continue
else :
# This is just a continuation of current text or bullet.
# Indenting is irrelevant.
print >>outfile, inputline
continue
## end loop on inputlines
if (active_type == "code") :
report_error("Unmatched codeblock delimiter (```) detected.")
infile.close()
outfile.close()
os.rename(FILENAME + ".tmp", FILENAME)
## end loop on FILENAMEs
trace("ending trace")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.