index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,700 | 9c1c9ab15a30e2eb26c952a349f7329e4e7bea46 | import numpy as np
#------part 1------#
wires = np.loadtxt('input.txt', dtype = str)
wire1 = wires[0].split(',')
wire2 = wires[1].split(',')
w, wmax, wmin, h, hmax, hmin = 0, 0, 0, 0, 0, 0
for order in wire1:
if order[0] == 'R':
# print('R-', order)
w += int(order[1:])
wmax = max(w, wmax)
# print('w = ', w, ' wmax = ', wmax)
elif order[0] == 'L':
# print('L-', order)
w -= int(order[1:])
wmin = min(w, wmin)
# print('w = ', w, ' wmin = ', wmin)
elif order[0] == 'U':
# print('U-', order)
h += int(order[1:])
hmax = max(h, hmax)
# print('h = ', h, ' hmax = ', hmax)
elif order[0] == 'D':
# print('D-', order)
h-= int(order[1:])
hmin = min(h, hmin)
# print('h = ', h, ' hmax = ', hmin)
print(w, wmax, wmin, h, hmax, hmin)
w, h = 0, 0
for order in wire2:
if order[0] == 'R':
# print('R-', order)
w += int(order[1:])
wmax = max(w, wmax)
# print('w = ', w, ' wmax = ', wmax)
elif order[0] == 'L':
# print('L-', order)
w -= int(order[1:])
wmin = min(w, wmin)
# print('w = ', w, ' wmin = ', wmin)
elif order[0] == 'U':
# print('U-', order)
h += int(order[1:])
hmax = max(h, hmax)
# print('h = ', h, ' hmax = ', hmax)
elif order[0] == 'D':
# print('D-', order)
h-= int(order[1:])
hmin = min(h, hmin)
# print('h = ', h, ' hmax = ', hmin)
print(w, wmax, wmin, h, hmax, hmin)
col = wmax - wmin + 1
row = hmax - hmin + 1
print(row, col)
c_row = max(hmax, 0)
if wmin < 0:
c_col = -wmin
else:
c_col = 0
print(c_row, c_col)
center_r, center_c = c_row, c_col
space = np.zeros((row, col), dtype = int)
print(space.shape)
space[c_row][c_col] = -1
for order in wire1:
print(order, c_row, c_col)
if order[0] == 'R':
to_col = c_col + int(order[1:])
print('c_col =', c_col, ' to_col = ', to_col)
space[c_row, c_col + 1: to_col + 1] = 1
c_col = to_col
elif order[0] == 'L':
to_col = c_col - int(order[1:])
print('c_col =', c_col, ' to_col = ', to_col)
space[c_row, to_col: c_col] = 1
c_col = to_col
elif order[0] == 'U':
to_row = c_row - int(order[1:])
print('c_row =', c_row, ' to_row = ', to_row)
space[to_row: c_row, c_col] = 1
c_row = to_row
elif order[0] == 'D':
to_row = c_row + int(order[1:])
print('c_row =', c_row, ' to_row = ', to_row)
space[c_row + 1: to_row + 1, c_col] = 1
c_row = to_row
c_row, c_col = center_r, center_c
print(c_row, c_col)
for order in wire2:
print(order, c_row, c_col)
if order[0] == 'R':
to_col = c_col + int(order[1:])
print('c_col =', c_col, ' to_col = ', to_col)
tmp = space[c_row, c_col + 1: to_col + 1]
tmp[tmp == 1] = 2
space[c_row, c_col + 1: to_col + 1] = tmp
c_col = to_col
elif order[0] == 'L':
to_col = c_col - int(order[1:])
print('c_col =', c_col, ' to_col = ', to_col)
tmp = space[c_row, to_col: c_col]
tmp[tmp == 1] = 2
space[c_row, to_col: c_col] = tmp
c_col = to_col
elif order[0] == 'U':
to_row = c_row - int(order[1:])
print('c_row =', c_row, ' to_row = ', to_row)
tmp = space[to_row: c_row, c_col]
tmp[tmp == 1] = 2
space[to_row: c_row, c_col] = tmp
c_row = to_row
elif order[0] == 'D':
to_row = c_row + int(order[1:])
print('c_row =', c_row, ' to_row = ', to_row)
tmp = space[c_row + 1: to_row + 1, c_col]
tmp[tmp == 1] = 2
space[c_row + 1: to_row + 1, c_col] = tmp
c_row = to_row
dist = col + row
for i in range(space.shape[0]):
for j in range(space.shape[1]):
if space[i, j] == 2:
dist = min(dist, np.abs(i - center_r) + np.abs(j - center_c))
print(dist) |
995,701 | 8b3929a49383f9c8bb22acbb2b96731918c4ae4c | # ---------------------------------------------------------------------------- #
# Title: mailroom part 1
# Description: A program that holds a list of donors and amounts they donated.
# Prompt the user to choose 3 menu actions; thank you, report, quit.
#
# <05/30/2020>, Created Script
# ---------------------------------------------------------------------------- #
import sys # imports go at the top of the file
# Data ---------------------------------------------------------------------- #
# Task 1: Create data
# Create a data structure that holds a list of your donors and a history of the amounts they have donated.
# Have least five donors, with between 1 and 3 donations each, store in the global namespace.
donor_info = [("Jim Zorn", [3772.32, 1201.17]),
("Jermaine Kearse", [877.33]),
("Marcus Trufant", [1563.23, 1043.87, 1.32]),
("K.J. Wright", [21663.23, 300.87, 100432.0]),
("Curt Warner", [663.23, 300.87, 10432.0]),
]
# Processing --------------------------------------------------------------- #
# Task 2: Menu choices
# Prompt the user to choose from a menu of 3 actions: “Send a Thank You”, “Create a Report” or “quit”.
###############################################
# "Menu of Choices"
###############################################
def main():
'''
Menu of Choices: Thank You, Report, Create a Report
:return:
'''
while (True):
print("""
\n**** XYZ Nonprofit Charity ****\n
Menu of Options
1) Send a "Thank You"
2) Create a Report
3) Exit Program
""")
strChoice = str(input("Which option would you like to perform? [1 to 3] - "))
print() # added a new line for spacing looks
if (strChoice.strip() == '1'):
thank_you()
elif (strChoice.strip() == '2'):
report()
elif (strChoice.strip() == '3'):
exit_program()
else:
print("\nPlease choose only 1, 2, or 3!")
###############################################
# #1 Send a "Thank You"
###############################################
def thank_you():
'''
Thank You: Will append donations to existing donors, or adds new donors with donations.
User can see list of existing donors by typing 'list'. Once donation is entered script will create
custom thank you letter/ email.
:return:
'''
while (True):
donor_name = input("Type 'List' to see all names in Database or\nType First and Last Name: ").title()
if (donor_name.lower() == 'list'):
# define format row
row = "{Name:<18s}".format
print("\nDONOR REGISTRY:")
donor_sort = (sorted(donor_info))
for donor in donor_sort:
donor_lst = (row(Name=donor[0]))
print(donor_lst)
input("Press Enter to Continue")
print()
continue
if (donor_name.lower() != 'list'):
donor_amount = int(input("Enter Amount of Donation: $"))
# if name is in list append dollar amount to donors bucket
for i in donor_info:
if i[0] == donor_name:
i[1].append(donor_amount)
print(f"'{donor_name}' is in registry added new donation amount!")
break
# if name not in list add new name to donor info list
else:
donor_info.append((donor_name, [donor_amount]))
print(f"New donor name '{donor_name}' added to registry!")
# print(donor_info) - for testing
break
input("\nPress Enter for Thank You letter:\n")
print(f'''Dear {donor_name},
Thank you for your donation of '${donor_amount}' to XYZ Nonprofit for children! It really makes a huge impact for the children
in our community.
Those three hours after the end of the school day can make a crucial difference in kids’ lives.
Thanks to you, kids have a safe place to go after school. Instead of going home alone while their families are at work,
our kids are learning to play sports, create art, and improving their grades at our Homework Help Center. All while
forming friendships with peers and relationships with adult mentors and tutors.
Thank you again {donor_name}, for your ongoing support of our kids!
Sincerely,
XYZ Nonprofit Agency Director
''')
input("Press Enter to Continue")
break
###############################################
# #2 Create a Report
###############################################
def report():
''' Report: creates a tabular summary report of donor transactions'''
while (True):
# title
title = "******************* XYZ Nonprofit Charity ********************\n" \
"________________________________________________________________"
heading_name = ("| Donor Name:", "| Totals $:", "| #-of-Gifts:", "| Avg Gift $: ", "|")
header = f"{heading_name[0].upper():<20s} {heading_name[1].upper():<13s}" \
f" {heading_name[2].upper():<6s} {heading_name[3].upper():<14s}|"
print(title)
print(header)
#define format row
row = "| {Name:<18s} | ${Total:<10.2f} | {NumbGifts:^11d} | ${AverageGifts:<10.2f} |".format
for i in donor_info:
print(row(Name=i[0], Total=(sum(i[1])), NumbGifts=len(i[1]), AverageGifts=sum(i[1], 0) / len(i[1])))
print("________________________________________________________________")
input("Press Enter to Continue")
break
###############################################
# #3 Exit the Program
###############################################
def exit_program():
'''
Will exit the program
:return:
'''
while (True):
exitchoice = input("Would you like to Exit?\nEnter 'y' or 'n': ")
if (exitchoice.lower() == 'y'):
print("\nExiting!")
sys.exit()
elif (exitchoice.lower() == 'n'):
main()
# Main Body of Script ------------------------------------------------------ #
if __name__ == '__main__':
main() |
995,702 | e942bafe9740285f9a0f651d7e28ca89456bc902 | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
"""
Runs data cleaning scripts to turn data from (../raw) into
cleaned data ready to be analyzed (saved in ../interim).
This function severely lacks any type of error checking. It was built
with a specific dataset in mind but with minor tweaks it should be
fairly straightforward to extend these methods to any other dataset.
Code for data cleaning was partly taken from
https://github.com/jdills26/Tanzania-water-table
and
https://rstudio-pubs-static.s3.amazonaws.com/339668_006f4906390e41cea23b3b786cc0230a.html
Steps in this function
- Combine extraction_type, extraction_type_class and extraction_type_group
"""
# Hardcoded data files
DATA_FILEPATH = "./data/raw/water_pump_set.csv"
LABEL_FILEPATH = "./data/raw/water_pump_labels.csv"
OUT_FILEPATH = "./data/interim/data_cleaned.csv"
logger = logging.getLogger(__name__)
logger.info('Combining extraction columns...')
# Read data using pandas as simple as possible
df = pd.read_csv(DATA_FILEPATH)
ydat = pd.read_csv(LABEL_FILEPATH)
# Reformat date and merge with labels for intermediate analyses
df['date_recorded'] = pd.to_datetime(df['date_recorded'])
df = pd.merge(df, ydat, on='id') # Merge by id to guarantee correct labels
# Group low occuring counts together or into related groups
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Drop columns AFTER modifying other columns
# Define them here for clarity
columns_to_drop = [
"recorded_by",
"wpt_name",
"scheme_name",
"scheme_management",
"extraction_type_group",
"funder",
"installer",
# "source_class",
"source_type", # Maybe we could try source_type instead.
"payment",
"waterpoint_type_group",
"management_group",
"permit",
"public_meeting",
# "lga",
# "subvillage",
"amount_tsh",
#"construction_year",
"num_private",
"population"
]
# Combine extraction type by manually combining low-occurring groups
df['extraction_type'][df.extraction_type.isin(['india mark ii','india mark iii'])] = 'india mark'
df['extraction_type'][df.extraction_type.isin(['cemo', 'climax'])] = 'motorpump'
df['extraction_type'][df.extraction_type.isin(['other - mkulima/shinyanga','other - play pump', 'walimi'])] = 'handpump'
df['extraction_type'][df.extraction_type.isin(['other - swn 81','swn 80'])] = 'swn'
# Logger not working as long as this is a script and not a Python module
# Will fix later!
logger.info('Remove unused features from data...')
#use lga column to create new feature: rural, urban, other
def map_lga(x):
x=str(x)
if x.find('Rural') >= 0: return 'rural'
if x.find('Urban') >= 0: return 'urban'
return 'other'
# Map rural, urban or other from the lga column
df['rural_urban'] = df.lga.apply(map_lga)
#use date time function in pandas
df['date_recorded'] = pd.to_datetime(df['date_recorded']) # Reformat the date_recorded
median_construction_year = df.construction_year[df.construction_year!=0].median()
df['construction_year'] = df.construction_year.map(lambda x: median_construction_year if x == 0 else x) # Replace 0 with median
df['age'] = df.date_recorded.dt.year - df.construction_year # Track lifetime of waterpump in years
#remove extraction_type_group
df.drop(columns_to_drop, axis=1, inplace=True)
# Not logger.info but print() at least
print(f"Saving cleaned feature values to %s..." % OUT_FILEPATH)
df.to_csv(OUT_FILEPATH)
|
995,703 | 5f62ad61e225223abce9bbde84f06bb2f0a4a780 | import io
import os
import os.path
import struct
import socket
import stat
import time
import config
import storage
import database
import proto.nofs_local_pb2 as nofs_local
from proto.nofs_local_pb2 import *
def enum_from_value(enumtype, value):
for v in enumtype.values:
if v.number == value:
return v.name
return None
def read_exact(stream, nbytes):
result = b""
while len(result) < nbytes:
read = stream.read(nbytes - len(result))
if len(read) == 0:
return b""
result += read
return result
class Header(object):
bfmt = "=LL"
bsize = struct.calcsize(bfmt)
def __init__(self, pkt_type, payload_len):
self.pkt_type = pkt_type
self.payload_len = payload_len
def to_stream(self, s):
bdata = struct.pack(Header.bfmt, self.pkt_type, self.payload_len)
s.write(bdata)
@staticmethod
def from_stream(s):
bdata = read_exact(s, Header.bsize)
if len(bdata) < Header.bsize:
return None
(pt, pl) = struct.unpack(Header.bfmt, bdata)
return Header(pt, pl)
def __str__(self):
return "{0} ({1})".format(enum_from_value(nofs_local._MESSAGETYPE, self.pkt_type), self.payload_len)
################################
# Handlers
def do_stat(fp):
"""try:
stat_res = os.stat(fp)
except OSError:
print("Couldn't stat", fp)
return None
ctime_loc = stat_res.st_ctime
ctime_utc = int(time.mktime(time.gmtime(ctime_loc)))
if stat.S_ISREG(stat_res.st_mode):
ftype = stat.S_IFREG
elif stat.S_ISDIR(stat_res.st_mode):
ftype = stat.S_IFDIR
else:
print("Isn't file or dir", fp)
return None"""
stat_res = fp.stat()
rs = RespStat()
rs.ftype = stat.S_IFREG
rs.perms = 0
rs.inode = fp.fid
rs.size = stat_res['size_bytes']
rs.ctime_utc = stat_res['dt_utc']
return rs
def handle_stat(packet, wfile):
#fp = os.path.join(config.DATA_DIR, packet.filepath[1:])
#sr = do_stat(fp)
fp = storage.get_file(packet.filepath[1:])
if fp is None:
return ERR_NOENT
sr = do_stat(fp)
ser = sr.SerializeToString()
Header(RESP_STAT, len(ser)).to_stream(wfile)
wfile.write(ser)
def handle_listdir(packet, wfile):
dp = os.path.join(config.DATA_DIR, packet.dirpath[1:])
print(dp)
lr = RespListdir()
with database.connect() as c:
rows = c.execute('SELECT * FROM Files').fetchall()
for f in rows:
fp = storage.get_file(f[1])
sr = do_stat(fp)
le = lr.entry.add()
le.filename = f[1]
le.stat.CopyFrom(sr)
ser = lr.SerializeToString()
Header(RESP_LISTDIR, len(ser)).to_stream(wfile)
wfile.write(ser)
def handle_read(packet, wfile):
fp = storage.get_file(packet.filepath[1:])
if fp is None:
return ERR_NOENT
bdata = fp.read(packet.offset, packet.length)
Header(RESP_READ, len(bdata)).to_stream(wfile)
wfile.write(bdata)
def handle_adm_addfile(packet, wfile):
fname = packet.ext_filepath
destdir = packet.destdir
if not os.path.exists(fname):
return ERR_NOENT
fid = storage.store_file_ext(fname, destdir)
rs = ARespAddFile()
rs.fid = fid
ser = rs.SerializeToString()
Header(RESP_ADM_ADDFILE, len(ser)).to_stream(wfile)
wfile.write(ser)
pkt_type_to_type_handler = {
REQ_STAT: (ReqStat, handle_stat),
REQ_LISTDIR: (ReqListdir, handle_listdir),
REQ_READ: (ReqRead, handle_read),
REQ_ADM_ADDFILE: (AReqAddFile, handle_adm_addfile),
}
def handle(header, data, wfile):
print(header)
if header.pkt_type not in pkt_type_to_type_handler:
print("Unrecognized packet type: {}".format(header.pkt_type))
error = ERR_BADPACKET
else:
ptype, handler = pkt_type_to_type_handler[header.pkt_type]
packet = ptype()
packet.ParseFromString(data)
error = handler(packet, wfile)
if error is not None:
Header(RESP_ERROR, 4).to_stream(wfile)
wfile.write(struct.pack("=l", error))
|
995,704 | 9accc4f4a287608c670a1ca6752198f590a52219 | import hashlib
import datetime
import couchdbkit
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.threadlocal import get_current_registry
from pyramid.events import NewRequest
from pyramid.events import subscriber
from beaker.cache import cache_region
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_all_lexers
from paulla.paste.models import Paste
settings = get_current_registry().settings
expireChoice = {"never": None,
"1day": datetime.timedelta(days=1),
"1week": datetime.timedelta(days=7),
"1month": datetime.timedelta(days=31)
}
# couchdb connection
server = couchdbkit.Server(settings['couchdb.url'])
db = server.get_or_create_db(settings['couchdb.db'])
Paste.set_db(db)
formatter = HtmlFormatter(linenos=True, full=True, cssclass="source")
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
"""
Home page.
first page to be called.
"""
return {'lexers': lexers()}
def _buildPassword(username, createdTime, password):
"""
Build sha1 of password.
:param username: username field of paste,
:param createdTime : datetime of creation the paste.
:param password: password to encrypt.
_buildPassword also use a salt defined in configuration file.
"""
if not password:
return ''
tmp = ''.join((username, str(createdTime).split('.')[0], password, settings['salt']))
sha1 = hashlib.sha224()
sha1.update(tmp)
return sha1.hexdigest()
@view_config(route_name='addContent', renderer='json')
def add(request):
"""
Adding a new content.
if ok return to the oneContent page.
"""
username = request.POST['username']
password = ''
now = datetime.datetime.now()
expire = request.POST['expire']
expireDate = None
if expire:
delta = expireChoice[expire]
if delta:
expireDate = now + delta
if username:
password = _buildPassword(username, now, request.POST['password'])
paste = Paste(title=request.POST['title'],
content=request.POST['content'],
created=now,
typeContent=request.POST['type'],
username=username,
password=password,
expire=expireDate)
paste.save()
request.session.flash(u"Add ok") # TODO translatoion
return HTTPFound(request.route_path('oneContent', idContent=paste._id))
@view_config(route_name='oneContent', renderer='templates/content.pt')
def content(request):
"""
Display a content Paste.
"""
paste = Paste.get(request.matchdict['idContent'])
lexer = get_lexer_by_name(paste.typeContent, stripall=True)
result = highlight(paste['content'], lexer, formatter)
return {'paste': paste,
'content': result,}
@view_config(route_name='oneContentRaw', renderer='string' )
def contentRaw(request):
"""
Display a raw content paste.
"""
paste = Paste.get(request.matchdict['idContent'])
# TODO type/mime
return paste.content
@cache_region('short_term', 'previous')
def previous():
"""
Return the list of the 10 previous paste.
"""
try:
previousPastes = Paste.view('paste/all', limit=10).all()
except:
previousPastes = []
return previousPastes
@cache_region('long_term', 'lexers')
def lexers():
"""
Return the list of the pigments lexers.
"""
result = [(lexer[0], lexer[1][0]) for lexer in get_all_lexers()]
result.sort()
return result
@subscriber(NewRequest)
def previousEvent(event):
"""
subscriber of newRequest.
"""
event.request.previous = previous()
@view_config(route_name='edit', renderer='templates/edit.pt')
def edit(request):
"""
Edit a paste.
"""
paste = Paste.get(request.matchdict['idContent'])
return {'lexers': lexers(),
'paste': paste,}
@view_config(route_name='update')
def update(request):
"""
Updating a paste.
return to display if succed.
return to edit if fail.
"""
paste = Paste.get(request.matchdict['idContent'])
password = _buildPassword(paste.username, paste.created, request.POST['password'])
if password == paste.password:
paste.title = request.POST['title']
paste.content = request.POST['content']
paste.save()
request.session.flash(u"Updated") # TODO translatoion
return HTTPFound(request.route_path('oneContent', idContent=paste._id))
request.session.flash(u"Wrong password") # TODO translatoion
return HTTPFound(request.route_path('edit', idContent=paste._id))
@view_config(route_name='deleteConfirm', renderer='templates/delete_confirm.pt')
def deleteConfirm(request):
"""
Ask confirmation on delete.
"""
paste = Paste.get(request.matchdict['idContent'])
if not(paste.username and paste.password):
return HTTPFound(request.route_path('oneContent', idContent=paste._id))
lexer = get_lexer_by_name(paste.typeContent, stripall=True)
result = highlight(paste['content'], lexer, formatter)
return {'paste': paste,
'content': result,}
@view_config(route_name='delete')
def delete(request):
"""
Delete a paste.
return to / if succed
return to deleteConfigm is fail.
"""
paste = Paste.get(request.matchdict['idContent'])
password = _buildPassword(paste.username,
paste.created,
request.POST['password'])
if password == paste.password:
paste.delete()
request.session.flash(u"Deleted") # TODO translatoion
return HTTPFound(request.route_path('home', ))
request.session.flash(u"Wrong password") # TODO translatoion
return HTTPFound(request.route_path('deleteConfirm', idContent=paste._id))
@view_config(route_name='rss2', renderer='templates/rss2.pt')
def rss2(request):
"""
Yeah we have rss !
"""
return {'pastes': previous()}
|
995,705 | eacf2e57af3d54eaf300747b70774c7f83dbddfc | # -*- coding: utf-8 -*-
from java.util.logging import Level
from java.io import File
from java.lang import Class
from java.sql import DriverManager
from java.sql import SQLException
import os
import IM_sqlitedb_android
def kate(self, progressBar, kate_files):
blackboardAttribute = IM_sqlitedb_android.BlackboardAttribute
imdbIngestModuleFactory = IM_sqlitedb_android.IMDbIngestModuleFactory
case = IM_sqlitedb_android.Case.getCurrentCase()
try:
artID_vk_kate = case.getSleuthkitCase().addArtifactType("TSK_CHATS_VK2", "Kate Mobile (ВКонтакте) - сообщения".decode('UTF-8'))
except:
artID_vk_kate = case.getSleuthkitCase().getArtifactTypeID("TSK_CHATS_VK2")
try:
artID_contact_kate = case.getSleuthkitCase().addArtifactType("TSK_CHATS_CONTACTS_KATE", "Kate Mobile (ВКонтакте) - контакты".decode('UTF-8'))
except:
artID_contact_kate = case.getSleuthkitCase().getArtifactTypeID("TSK_CHATS_CONTACTS_KATE")
try:
artID_wall = case.getSleuthkitCase().addArtifactType("TSK_CHATS_WALL", "Kate Mobile (ВКонтакте) - стена".decode('UTF-8'))
except:
artID_wall = case.getSleuthkitCase().getArtifactTypeID("TSK_CHATS_WALL")
try:
attID_nr = case.getSleuthkitCase().addArtifactAttributeType("TSK_MESS_ID", blackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Идентификатор сообщения".decode('UTF-8'))
except:
attID_nr = case.getSleuthkitCase().getAttributeType("TSK_MESS_ID")
try:
attID_sender = case.getSleuthkitCase().addArtifactAttributeType("TSK_MESS_OTPRAV", blackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Пользователь, отправивший сообщение".decode('UTF-8'))
except:
attID_sender = case.getSleuthkitCase().getAttributeType("TSK_MESS_OTPRAV")
try:
attID_reciever = case.getSleuthkitCase().addArtifactAttributeType("TSK_MESS_RECIEVER", blackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Пользователь, получивший сообщение".decode('UTF-8'))
except:
attID_reciever = case.getSleuthkitCase().getAttributeType("TSK_MESS_RECIEVER")
try:
attID_status = case.getSleuthkitCase().addArtifactAttributeType("TSK_MESS_STATUS", blackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Дополнительная информация".decode('UTF-8'))
except:
attID_status = case.getSleuthkitCase().getAttributeType("TSK_MESS_STATUS")
try:
attID_companion = case.getSleuthkitCase().addArtifactAttributeType("TSK_MESS_COMPANION", blackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING, "Собеседник".decode('UTF-8'))
except:
attID_companion = case.getSleuthkitCase().getAttributeType("TSK_MESS_COMPANION")
for file in kate_files:
self.log(Level.INFO, "Processing file: " + file.getName())
lclDbPath = os.path.join(case.getTempDirectory(), str(file.getId()) + ".db")
IM_sqlitedb_android.ContentUtils.writeToFile(file, File(lclDbPath))
try:
Class.forName("org.sqlite.JDBC").newInstance()
dbConn = DriverManager.getConnection("jdbc:sqlite:%s" % lclDbPath)
stmt = dbConn.createStatement()
stmt2 = dbConn.createStatement()
stmt3 = dbConn.createStatement()
stmt4 = dbConn.createStatement()
except SQLException as e:
self.log(Level.INFO, "Could not open database file (not SQLite) " + file.getName() + " (" + e.getMessage() + ")")
try:
resultSet = stmt.executeQuery("select message_id, (select users.last_name || ' ' || users.first_name || ' (' || messages.[account_id] || ')' from users where messages.[account_id]=users._id) as [sender_name], (select users.last_name || ' ' || users.first_name || ' (' || users._id || ')' from users where messages.uid=users._id) as [reciever_name], (select users.nickname from users where messages.uid=users._id) as [nickname], (select users.birthdate from users where messages.uid=users._id) as [rec_birthday], messages.title as [name_mess], messages.body as [text], messages.date as [date], case messages.read_state when 1 then 'Прочитано' else 'Не прочитано' end as [status1], messages.is_out as [status2] from messages order by messages.date".decode('UTF-8'))
except SQLException as e:
self.log(Level.INFO, "Error querying database for kate table (" + e.getMessage() + ") resultSet")
try:
resultSet_contacts = stmt2.executeQuery("select users.[last_name] || ' ' || users.[first_name] || ' (' || users.[_id] || ')' as [id], users.[nickname], users.[photo_medium_rec] as [photo], users.[birthdate], users.[mobile_phone], users.[home_phone], users.[status] from users order by users.[_id]")
except SQLException as e:
self.log(Level.INFO, "Error querying database for kate table (" + e.getMessage() + ") resultSet_contacts")
try:
resultSet_wall = stmt3.executeQuery("select _id, case when from_id > 0 then (select first_name || ' ' || last_name from users where users._id=wall.from_id) else 'Сообщение группы: ' || (select groups.name from groups where groups._id=wall.from_id*-1) end as [user], date, case when text='' then (select attachments.type || ': ' || case attachments.type when 'photo' then (select photos.src_big from photos where attachments.[photo_id]=photos.[photo_id]) when 'video' then (select video.[title] || ' ' || video.[image_big] from video where attachments.[video_id]=video.[video_id]) when 'link' then attachments.link_url when 'audio' then (select audio.[artist] || ' ' || audio.[title] from audio where attachments.[audio_id]=audio.[audio_id]) when 'poll' then (select poll.[question] from poll where attachments.[poll_id]=poll.[poll_id]) when 'page' then attachments.[page_title] || ' (page id:' || attachments.[page_id] || ')' when 'geo' then attachments.[geo_lat] || ', ' || attachments.[geo_lon] when 'doc' then (select docs.title || ' ' || docs.[ext] from attachments, docs where attachments.[doc_id]=docs.[doc_id]) else ' ' end from attachments where attachments.[content_id]=wall._id) else text end as [text] from wall order by date".decode('UTF-8'))
except SQLException as e:
self.log(Level.INFO, "Error querying database for kate table (" + e.getMessage() + ") resultSet_wall")
try:
resultSet_wall = stmt4.executeQuery("select _id, case when from_id > 0 then (select first_name || ' ' || last_name from users where users._id=wall.from_id) else 'Сообщение группы: ' || (select groups.name from groups where groups._id=wall.from_id*-1) end as [user], date, case when text='' then (select attachments.type || ': ' || case attachments.type when 'photo' then (select photos.src_big from photos where attachments.[photo_id]=photos.[_id]) when 'video' then (select video.[title] || ' ' || video.[image_big] from video where attachments.[video_id]=video.[video_id]) when 'link' then attachments.link_url when 'audio' then (select audio.[artist] || ' ' || audio.[title] from audio where attachments.[audio_id]=audio.[audio_id]) when 'poll' then (select poll.[question] from poll where attachments.[poll_id]=poll.[poll_id]) when 'page' then attachments.[page_title] || ' (page id:' || attachments.[page_id] || ')' when 'geo' then attachments.[geo_lat] || ', ' || attachments.[geo_lon] when 'doc' then (select docs.title || ' ' || docs.[ext] from attachments, docs where attachments.[doc_id]=docs.[_id]) else ' ' end from attachments where attachments.[content_id]=wall._id) else text end as [text] from wall order by date".decode('UTF-8'))
except SQLException as e:
self.log(Level.INFO, "Error querying database for kate table (" + e.getMessage() + ") resultSet_wall_2")
if 'resultSet' in locals():
while resultSet.next():
try:
mess_id = resultSet.getString("message_id")
date = resultSet.getInt("date")
sender = resultSet.getString("sender_name")
mess = resultSet.getString("text")
nickname = resultSet.getString("nickname")
birthday = resultSet.getString("rec_birthday")
name_mess = resultSet.getString("name_mess")
info_arr = []
info_arr.append(resultSet.getString("reciever_name"))
if nickname:
info_arr.append(" (логин: ".decode('UTF-8'))
info_arr.append(nickname)
info_arr.append(") ".decode('UTF-8'))
if birthday:
info_arr.append(", День рождения: ".decode('UTF-8'))
info_arr.append(birthday)
reciever = ''.join(info_arr)
status_arr = []
if name_mess:
status_arr.append("Название переписки: \"".decode('UTF-8'))
status_arr.append(name_mess)
status_arr.append("\"; ".decode('UTF-8'))
status_arr.append("Статус сообщения: ".decode('UTF-8'))
status_arr.append(resultSet.getString("status1"))
status = ''.join(status_arr)
except SQLException as e:
self.log(Level.INFO, "Error getting values from kate message table (" + e.getMessage() + ")")
art = file.newArtifact(artID_vk_kate)
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_MESSAGE_TYPE,
imdbIngestModuleFactory.moduleName, "Kate Mobile"))
art.addAttribute(blackboardAttribute(attID_nr, imdbIngestModuleFactory.moduleName, mess_id))
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME.getTypeID(),
imdbIngestModuleFactory.moduleName, date))
if resultSet.getInt("status2") == 0:
art.addAttribute(blackboardAttribute(attID_sender, imdbIngestModuleFactory.moduleName, reciever))
art.addAttribute(blackboardAttribute(attID_reciever, imdbIngestModuleFactory.moduleName, sender))
else:
art.addAttribute(blackboardAttribute(attID_sender, imdbIngestModuleFactory.moduleName, sender))
art.addAttribute(blackboardAttribute(attID_reciever, imdbIngestModuleFactory.moduleName, reciever))
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_TEXT.getTypeID(),
imdbIngestModuleFactory.moduleName, mess))
art.addAttribute(blackboardAttribute(attID_status, imdbIngestModuleFactory.moduleName, status))
IM_sqlitedb_android.IngestServices.getInstance().fireModuleDataEvent(
IM_sqlitedb_android.ModuleDataEvent(imdbIngestModuleFactory.moduleName,
IM_sqlitedb_android.BlackboardArtifact.ARTIFACT_TYPE.TSK_MESSAGE, None))
if 'resultSet_contacts' in locals():
while resultSet_contacts.next():
try:
name = resultSet_contacts.getString("id")
photo_link = resultSet_contacts.getString("photo")
mobile_phone = resultSet_contacts.getString("mobile_phone")
home_phone = resultSet_contacts.getString("home_phone")
nickname = resultSet_contacts.getString("nickname")
birthdate = resultSet_contacts.getString("birthdate")
status = resultSet_contacts.getString("status")
status_arr = []
if nickname:
status_arr.append("Псевдоним пользователя: \"".decode('UTF-8'))
status_arr.append(nickname)
status_arr.append("\"; ".decode('UTF-8'))
if birthdate:
status_arr.append("Указанный пользователем день рождения: ".decode('UTF-8'))
status_arr.append(birthdate)
status_arr.append("; ".decode('UTF-8'))
if status:
status_arr.append("Указанный пользователем статус: ".decode('UTF-8'))
status_arr.append(status)
status = ''.join(status_arr)
except SQLException as e:
self.log(Level.INFO, "Error getting values from kate contacts table (" + e.getMessage() + ")")
art = file.newArtifact(artID_contact_kate)
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_NAME_PERSON.getTypeID(),
imdbIngestModuleFactory.moduleName, name))
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_URL.getTypeID(),
imdbIngestModuleFactory.moduleName, photo_link))
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_MOBILE.getTypeID(),
imdbIngestModuleFactory.moduleName, mobile_phone))
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_PHONE_NUMBER_HOME.getTypeID(),
imdbIngestModuleFactory.moduleName, home_phone))
art.addAttribute(blackboardAttribute(attID_status, imdbIngestModuleFactory.moduleName, status))
IM_sqlitedb_android.IngestServices.getInstance().fireModuleDataEvent(
IM_sqlitedb_android.ModuleDataEvent(imdbIngestModuleFactory.moduleName,
IM_sqlitedb_android.BlackboardArtifact.ARTIFACT_TYPE.TSK_MESSAGE, None))
# wall
if 'resultSet_wall' in locals():
while resultSet_wall.next():
try:
post_id = resultSet_wall.getString("_id")
user = resultSet_wall.getString("user")
date = resultSet_wall.getInt("date")
text = resultSet_wall.getString("text")
except SQLException as e:
self.log(Level.INFO, "Error getting values from kate wall table (" + e.getMessage() + ")")
art = file.newArtifact(artID_wall)
art.addAttribute(blackboardAttribute(attID_nr, imdbIngestModuleFactory.moduleName, post_id))
art.addAttribute(blackboardAttribute(attID_sender, imdbIngestModuleFactory.moduleName, user))
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_DATETIME.getTypeID(),
imdbIngestModuleFactory.moduleName, date))
art.addAttribute(blackboardAttribute(blackboardAttribute.ATTRIBUTE_TYPE.TSK_TEXT.getTypeID(),
imdbIngestModuleFactory.moduleName, text))
IM_sqlitedb_android.IngestServices.getInstance().fireModuleDataEvent(
IM_sqlitedb_android.ModuleDataEvent(imdbIngestModuleFactory.moduleName,
IM_sqlitedb_android.BlackboardArtifact.ARTIFACT_TYPE.TSK_MESSAGE, None))
IM_sqlitedb_android.IMDbIngestModule.set_count(self, 1)
progressBar.progress(IM_sqlitedb_android.IMDbIngestModule.get_count(self))
if kate_files.index(file) == 0:
message = IM_sqlitedb_android.IngestMessage.createMessage(IM_sqlitedb_android.IngestMessage.MessageType.DATA,
imdbIngestModuleFactory.moduleName, "Обнаружены базы данных: Kate Mobile (ВКонтакте)".decode('UTF-8'))
IM_sqlitedb_android.IngestServices.getInstance().postMessage(message)
IM_sqlitedb_android.IMDbIngestModule.set_social_app_list(self, "Kate Mobile (ВКонтакте)")
try:
if 'resultSet' in locals():
resultSet.close()
if 'resultSet_contacts' in locals():
resultSet_contacts.close()
if 'resultSet_wall' in locals():
resultSet_wall.close()
stmt.close()
stmt2.close()
stmt3.close()
stmt4.close()
dbConn.close()
except Exception as ex:
self._logger.log(Level.SEVERE, "Error closing database", ex)
self._logger.log(Level.SEVERE, IM_sqlitedb_android.traceback.format_exc())
try:
os.remove(lclDbPath)
except Exception as ex:
self._logger.log(Level.SEVERE, "Error delete database from temp folder", ex)
|
995,706 | ddf5009dc0e99a9ed5d72d384a77b481c5611f13 | # variables that contain the user credential to access twitter api
import tweepy
from tweepy import OAuthHandler
import json
ACCESS_TOKEN = ""
ACCESS_TOKEN_SECRET = ""
CONUMER_KEY = ""
CONSUMER_SECRET = ""
#auth = OAuthHandler(CONUMER_KEY, CONSUMER_SECRET)
# auth.set_access_token(ACCESS_TOKEN, #ACCESS_TOKEN_SECRET)
#api = tweepy.API(auth)
# def process_or_store(tweet):
# print(json.dumps(tweet))
'''
for status in tweepy.Cursor(api.home_timeline).items(10):
# Process a single status
process_or_store(status._json)
'''
|
995,707 | 41020552d0b8af16e1ca43eea2eb93721f0845fd | import numpy as np
import gym
import tensorflow as tf
import json, sys, os
import random
import time
from gym import wrappers
log_dir= 'tmp'
env_to_use = 'Pendulum-v0'
# hyperparameters
# game parameters
env = gym.make(env_to_use)
# set seeds to 0
env.seed(10)
np.random.seed(10)
np.set_printoptions(threshold=np.nan)
outdir = 'tmp/ddpg-agent-results'
env = wrappers.Monitor(env, outdir, force=True,video_callable=lambda episode_id: True)
graph=tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
saver = tf.train.import_meta_graph(os.path.join(log_dir, 'pendulum-model.ckpt-800.meta'))
saver.restore(sess, tf.train.latest_checkpoint(log_dir))
state_ph = tf.get_collection("state_ph")[0]
actions = tf.get_collection("actions")[0]
is_training_ph = tf.get_collection("is_training_ph")[0]
#####################################################################################################
total_steps = 0
for ep in range(10):
total_reward = 0
steps_in_ep = 0
# Initial state
observation = env.reset()
for t in range(1000):
env.render()
# choose action based on deterministic policy
action_for_state, = sess.run(actions,
feed_dict = {state_ph: observation[None], is_training_ph: False})
# take step
next_observation, reward, done, _info = env.step(action_for_state)
total_reward += reward
observation = next_observation
total_steps += 1
steps_in_ep += 1
if done:
break
print('Episode %2i,Reward: %7.3f, Steps: %i'%(ep,total_reward,steps_in_ep))
time.sleep(0.1)
env.close()
sess.close()
|
995,708 | 237d01b1742f28c19a2eced9f34af0a60e2bfbc7 | import torch
import torch.nn.functional as F
from torch import nn as nn
from torch.autograd import Variable
SUPPORTED_METRICS = ['BCEWithLogitsLoss', 'CrossEntropyLoss', 'MSELoss', 'Accuracy']
class NewMetric:
def __init__(self, params, **kwargs):
super(NewMetric, self).__init__()
pass
def __call__(self, inputs, targets):
pass
class Accuracy:
def __init__(self, **kwargs):
pass
def __call__(self, inputs, targets):
inputs = F.softmax(inputs)
labels = torch.argmax(inputs, dim = 1)
accuracy = torch.mean((labels == targets).to(torch.double)).detach().cpu()
return accuracy
def get_evaluation_metric(config):
"""
Returns the evaluation metric function based on provided configuration
:param config: (dict) a top level configuration object containing the 'eval_metric' key
:return: an instance of the evaluation metric
"""
assert 'eval_metric' in config, 'Could not find evalvalutation metric configuration'
eval_config = config['eval_metric']
name = eval_config.pop('name')
if name == 'BCEWithLogitsLoss':
return nn.BCEWithLogitsLoss()
elif name == 'CrossEntropyLoss':
return nn.CrossEntropyLoss()
elif name == 'MSELoss':
return nn.MSELoss()
elif name == 'Accuracy':
return Accuracy()
else:
raise RuntimeError(f"Unsupported metric function: '{name}'. Supported losses: {SUPPORTED_METRICS}") |
995,709 | 33b677a40cadadbae0a9a1c8ed5cceeddc863472 | import re
import numpy as np
import pandas as pd
import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stopwords_english = stopwords.words('english')
# 考虑用频度阈值和停用词表进行过滤 http://www.cnblogs.com/amiza/p/10407801.html
class SAOMR:
def __init__(self, path='data/train.tsv', classes=5, shuffle=True):
self.path = path
self.classes = classes
self.shuffle = shuffle
self.pre_process()
def shuffle_data(self):
data = np.array([self.X_data, self.Y_data]).transpose(1, 0)
np.random.shuffle(data)
data = data.transpose(1, 0)
self.X_data = data[0]
self.Y_data = data[1]
def data_split(self, radio=None):
if radio is None:
radio = [0.8, 0.2]
data_size = self.X_data.shape[0]
train_size = int(data_size * radio[0])
# validate_size = int(data_size * radio[1])
self.X_train = self.X_data[:train_size]
self.Y_train = self.Y_one_hot[:, :train_size]
self.X_validate = self.X_data[train_size:]
self.Y_validate = self.Y_data[train_size:]
# self.X_validate = self.X_data[train_size:train_size + validate_size]
# self.Y_validate = self.Y_data[train_size:train_size + validate_size]
# self.X_test = self.X_data[train_size + validate_size:]
# self.Y_test = self.Y_data[train_size + validate_size:]
def sen_to_bag_of_words(self, sen):
res = np.zeros(self.vocab_size)
for word in sen:
if self.word2index.__contains__(word):
res[self.word2index[word]] += 1
return res
def sen_to_ngram(self, sen):
res = np.zeros(self.ngram_size)
res[:self.vocab_size] = self.sen_to_bag_of_words(sen)
if len(sen) >= 3:
for j in range(len(sen) - 2):
temp = ' '.join(sen[j:j + 3])
if self.ngram2index.__contains__(temp):
res[self.ngram2index[temp]] += 1
elif len(sen) >= 2:
for j in range(len(sen) - 1):
temp = ' '.join(sen[j:j + 2])
if self.ngram2index.__contains__(temp):
res[self.ngram2index[temp]] += 1
return res
def convert_to_onehot(self):
Y = np.eye(self.classes)[list(self.Y_data)].T
return Y
def pre_process(self):
df_train = pd.read_csv(self.path, sep='\t')
# clean, tokenize and lemmatize
df_train['Phrase'] = df_train['Phrase'].str.lower()
df_train['Phrase'] = df_train['Phrase'].apply((lambda x: re.sub('[^a-zA-Z]', ' ', x)))
lemmatizer = WordNetLemmatizer()
words_list = []
for sen in df_train.Phrase:
words = word_tokenize(sen.lower())
lemma_words = [lemmatizer.lemmatize(i) for i in words]
words = []
for i in lemma_words:
if i not in stopwords_english: # delete stopwords
words.append(i)
words_list.append(words)
self.X_data = np.array(words_list)
self.Y_data = np.array(df_train.Sentiment)
self.shuffle_data()
self.Y_one_hot = self.convert_to_onehot()
self.data_split()
self.vocab = set()
for i in self.X_data:
for j in i:
self.vocab.add(j)
self.vocab_size = len(self.vocab)
self.word2index = {}
for index, value in enumerate(self.vocab):
self.word2index[value] = index
ngram_2 = dict()
ngram_3 = dict()
for tmp in self.X_data:
if len(tmp) >= 3:
for j in range(len(tmp) - 2):
trigram = ' '.join(tmp[j:j + 3])
if ngram_3.__contains__(trigram):
ngram_3[trigram] += 1
else:
ngram_3[trigram] = 1
if len(tmp) >= 2:
for j in range(len(tmp) - 1):
bigram = ' '.join(tmp[j:j + 2])
if ngram_2.__contains__(bigram):
ngram_2[bigram] += 1
else:
ngram_2[bigram] = 1
keys = set(ngram_2.keys())
for key in keys:
if ngram_2[key] < 30:
ngram_2.pop(key)
keys = set(ngram_3.keys())
for key in keys:
if ngram_3[key] < 30:
ngram_3.pop(key)
self.ngram = np.concatenate(
(np.array(list(self.vocab)), np.array(list(ngram_2.keys())), np.array(list(ngram_3.keys()))),
axis=0) # use vocab as unigram
self.ngram_size = len(self.ngram)
self.ngram2index = {}
for index, value in enumerate(self.ngram):
self.ngram2index[value] = index
def get_bag_of_words(self, data):
batch_size = len(data)
res = np.empty([batch_size, self.vocab_size])
for i in range(batch_size):
res[i] = self.sen_to_bag_of_words(data[i])
return res
def get_n_gram(self, data):
batch_size = len(data)
res = np.empty([batch_size, self.ngram_size])
for i in range(batch_size):
res[i] = self.sen_to_ngram(data[i])
return res
|
995,710 | bbecfe66355e140492732160df209f7b65399a37 | n,m = map(int, input().split())
a = []
ans = 0
a = input().split()
for i in range(n):
a[i] = int(a[i])
for i in range(m):
b,c = map(int, input().split())
a = a + [c]*b
a.sort()
for i in range(n):
ans += a[len(a)-i-1]
print(ans) |
995,711 | 4d5d6f8884c349c617163f85e6a09f5309c17f89 | #!/usr/bin/env python
"""Define hooks to be run before project generation."""
import sys
from slugify import slugify
PROJECT_SLUG = "{{ cookiecutter.project_slug }}"
PROJECT_DIRNAME = "{{ cookiecutter.project_dirname }}"
def check_identifiers():
"""Check if project_slug and project_dirname are valid Python identifiers."""
if not PROJECT_SLUG.isidentifier():
sys.exit(f"project_slug='{PROJECT_SLUG}' is not a valid Python identifier.")
if not PROJECT_DIRNAME.isidentifier():
sys.exit(
f"project_dirname='{PROJECT_DIRNAME}' is not a valid Python identifier."
)
def check_slugs():
"""Check if project_slug and project_dirname are a valid slugs."""
_project_slug = slugify(PROJECT_SLUG, separator="")
if PROJECT_SLUG != _project_slug:
sys.exit(
f"project_slug='{PROJECT_SLUG}' is not a valid slug (e.g. {_project_slug})."
)
_project_dirname = slugify(PROJECT_DIRNAME, separator="")
if PROJECT_DIRNAME != _project_dirname:
sys.exit(
f"project_dirname='{PROJECT_DIRNAME}' is not a valid slug "
"(e.g. {_project_dirname})."
)
def main():
"""Execute intialization checks before project generation."""
check_slugs()
check_identifiers()
if __name__ == "__main__":
main()
|
995,712 | b01cd8d40699a81c62b461685a4b4efaeff4b12c |
class BonusFact:
def __init__(self, player, bonus):
self.player_id = player.PlayerID
self.description = bonus.Identifier
self.product_id = bonus.ProductID
self.currency = bonus.Currency
self.value = bonus.Value
self.activity_time = bonus.TransactionDate
def to_csv(self):
return [self.player_id, self.description, self.product_id, self.currency, self.value, self.activity_time]
def to_csv_with_mappings(self):
return [self.description, self.product_id, self.currency, self.value, self.activity_time, self.player_id]
@staticmethod
def get_headers():
return ["description", "product_id", "currency", "value", "activity_time", "player_id"]
class FreeSpinFact:
def __init__(self, player, free_spin):
self.player_id = player.PlayerID
self.description = free_spin.Identifier
self.number_of_free_spin = free_spin.Value
self.activity_date = free_spin.TransactionDate
def to_csv_with_mappings(self):
return [self.description, self.number_of_free_spin, self.activity_date, self.player_id]
def to_csv(self):
return [self.player_id, self.description, self.number_of_free_spin, self.activity_date]
@staticmethod
def get_headers():
return ["description", "free_spin_number", "activity_time", "player_id"]
|
995,713 | 5533b09a2be4e6c22649000a166028efbb4e8659 | #!/usr/bin/env python3
import sys
import os
sys.path.append('../modules')
import numpy as np
import matplotlib.pyplot as plt
import raytracing as rt
import visualize as vis
import ray_utilities
if __name__ == '__main__':
# Constants
image_plane = -1e6 # Image plane from first lens
fs = 100 # System focal length
aperture = 25.4 # Diameter of each mirror
npoints = 3 # Number of scene points
ymax = 1e5 # Size of imaging area
ymin = -1e5
nrays = 10 # Number of rays per scene point
lmb = 500 # Design wavelength
# Create a scene
scene = np.zeros((2, npoints))
scene[0, :] = image_plane
scene[1, :] = np.linspace(ymin, ymax, npoints)
components = []
# Add a concave mirror
components.append(rt.SphericalMirror(f=fs,
aperture=aperture,
pos=[0, 0],
theta=0))
# Place a detector just on the focal plane of the mirror
components.append(rt.Sensor(aperture=aperture,
pos=[-fs, 0],
theta=np.pi))
# Get initial rays
[rays, ptdict, colors] = ray_utilities.initial_rays(scene,
components[0],
nrays)
# Color code the rays emanating from each scene point
colors = ['b']*nrays + ['g']*nrays + ['r']*nrays
# Create a new canvas
canvas = vis.Canvas(xlim=[-2*fs, 5],
ylim=[-aperture, aperture],
figsize=[12, 12])
ray_bundles = rt.propagate_rays(components, rays, lmb)
canvas.draw_rays(ray_bundles, colors)
# Draw the components
canvas.draw_components(components)
# Show the canvas
canvas.show()
|
995,714 | 2a310859ce9c1897580fce000e068cd243a88f7f | #!/usr/bin/python
import subprocess
import sys
def run_command(command):
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
print line
retval = p.wait()
def get_backup_name(file):
fileHandle = open(file, "r")
lineList = fileHandle.readlines()
fileHandle.close()
lastLineIndex = len(lineList)-1
if lineList[lastLineIndex].isspace():
lastLineIndex -= 1
words = lineList[lastLineIndex].split()
return words[2]
if '/data/bahmni-backup/' in open('/etc/bahmni-backrest.conf').read():
print "bahmni-backrest.conf must point all backups to /data/bahmni-backup/"
else:
print "/etc/bahmni-backrest.conf is not updated to point the backups to /data/bahmni-backup/"
sys.exit(1)
print "Taking backup, please wait..."
run_command("bahmni -i local backup --backup_type=all --options=all")
openmrs_backup = get_backup_name("/data/bahmni-backup/openmrs/backup_info.txt")
reports_backup = get_backup_name("/data/bahmni-backup/reports/backup_info.txt")
postgres_backup = get_backup_name("/data/bahmni-backup/postgres/backup_info.txt")
print "The openmrs backup name is: ", openmrs_backup
print "The reports backup name is: ", reports_backup
print "The postgres backup name is: ", postgres_backup
backupfile = "/data/%s-bahmni-backup.tar.gz" % openmrs_backup
tar_cmd = "cd /data;tar -czf %s bahmni-backup/*" % backupfile
run_command(tar_cmd)
print "Created backup file", backupfile
|
995,715 | aa20e342f87ee6e8f0a3751af1a504302a38fad7 | #!/usr/bin/env python
# coding=utf-8
import numpy as np
import os
import shutil
import sys
from cv_bridge import CvBridge, CvBridgeError
import cv2
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
Nshot = 50
image_path = "/home/nizar/Images/tpROS/"
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def init():
if not os.path.isdir(image_path):
os.mkdir(image_path, 777)
else:
shutil.rmtree(image_path)
os.mkdir(image_path)
l = []
for i in range(Nshot):
c = []
l.append(c)
for j in range(Nshot):
l[i].append(0)
def compare_images(imageA, imageB, title):
# compute the mean squared error and structural similarity
# index for the images
m = mse(imageA, imageB)
# print "MSE : "+ str(m)
return m
class image_converter:
def __init__(self):
self.i = 0
self.x = int(Nshot / 2)
self.y = int(Nshot / 2)
self.bridge = CvBridge()
# self.image_sub = rospy.Subscriber("ardrone/front/image_raw",Image,self.callback)
self.image_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.callback)
self.pub_pos = rospy.Publisher("position", String, queue_size=10)
def whereiam(self, current_image):
score = {}
for (dirpath, dirnames, filenames) in os.walk(image_path):
for file in filenames:
img_tmp = os.path.join(dirpath, file)
image_tmp = cv2.imread(img_tmp)
score[file] = compare_images(current_image, image_tmp, "compares les")
res = min(score.values())
if res > 12000:
return False
else:
coord = score.get
for name, pourcentage in score.iteritems():
if pourcentage == res:
return name
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
cv2.imshow("cam de merde", cv_image)
if self.i == 0:
cv2.imwrite(image_path + str(self.x) + "_" + str(self.y) + ".jpg", cv_image)
self.i += 1
# RETOURNE L'IMAGE À LAQUELLE ON RESSEMBLE LE PLUS OU FALSE
res = self.whereiam(cv_image)
# cas d'une image inconnue
if not res and self.i < Nshot:
self.i += 1
# ajouter les bonnes coordonné avec l'imu
res = "/new_image" + str(self.i) + ".jpg"
print "inconnue " + str(res)
cv2.imwrite(image_path + res, cv_image)
# publication du nom de l'image dans laquelle on est ....
self.pub_pos.publish(str(res))
#
print res
k = cv2.waitKey(20)
def main(args):
init()
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
995,716 | f2bf2c6e2549deb323e5ed753ccce4a39f8e97ae | from bson.objectid import ObjectId
class Clients:
def __init__(self, db):
self.db = db
self.clients = []
def list(self):
return self.db.find()
def find_by_criteria(self, criteria):
clients = self.db.find(criteria)
return clients
def find_by_id(self, client_id):
client = self.db.find({'_id': ObjectId(client_id)})
return client
def find_by_name(self, name):
clients = self.db.find({'lastname': name})
return clients
def find_by_company(self, company):
clients = self.db.find({'company': company})
return clients
def find_by_state(self, state):
clients = self.db.find({'state': state})
return clients
def add(self, client):
res = self.db.insert_one(client)
if res.inserted_id:
return True
else:
return False
def update(self, client_id, client_updated):
res = self.db.update({'_id': ObjectId(client_id)}, {"$set": client_updated}, upsert=False)
return res['nModified'] > 0
def delete(self, client_id):
client = self.db.delete_one({'_id': ObjectId(client_id)})
return client.deleted_count == 1
def delete_all(self):
self.db.remove({})
|
995,717 | b8038d21ba2c48ec2fd61eca7f33bf95c9f15f4d | from cpp_parameters import *
# Testing correct and incorrect parameter counts being passed (kwargs and non-kwargs)
# Note that the implementation depends a lot on whether zero, one, two or more args are being wrapped
def is_python_fastproxy():
"""Return True if SWIG is generating Python code using -fastproxy."""
import cpp_parameters
# Note: _swig_new_instance_method is only generated when using -fastproxy
return hasattr(cpp_parameters, "_swig_new_instance_method")
# Zero parameters expected
x = Zero()
try:
x = Zero(z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x = Zero(0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.zero(z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.zero(0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Zero.stat_zero(z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Zero.stat_zero(0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_zero(z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_zero(0)
raise RuntimeError("Missed throw")
except TypeError:
pass
# One mandatory parameter expected
x = One(1)
try:
x = One(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x = One(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.one(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.one(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
One.stat_one(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
One.stat_one(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_one(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_one(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
# Two mandatory parameters expected
x = Two(1, 2)
try:
x = Two(a=1, b=2, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x = Two(1, 2, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.two(a=1, b=2, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.two(1, 2, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Two.stat_two(a=1, b=2, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Two.stat_two(1, 2, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_two(a=1, b=2, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_two(1, 2, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
# Single optional parameter expected
x = Single(1)
try:
x = Single(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x = Single(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.single(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.single(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Single.stat_single(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Single.stat_single(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_single(a=1, z=0)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_single(1, 0)
raise RuntimeError("Missed throw")
except TypeError:
pass
# Test that -builtin option throws TypeError if kwargs are used even when they look like they should work, kwargs are not supported unless using -keyword.
# Also same for -fastproxy option except that kwargs are supported by default for constructors. TODO: Fix inconsistency.
if is_python_builtin() or is_python_fastproxy():
# One mandatory parameter in API
x = One(1)
if is_python_fastproxy():
x = One(a=1)
else:
try:
x = One(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.one(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
One.stat_one(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_one(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
# Two mandatory parameters in API
x = Two(1, 2)
if is_python_fastproxy():
x = Two(a=1, b=2)
else:
try:
x = Two(a=1, b=2)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.two(a=1, b=2)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Two.stat_two(a=1, b=2)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_two(a=1, b=2)
raise RuntimeError("Missed throw")
except TypeError:
pass
# Single optional parameter in API
x = Single(1)
if is_python_fastproxy():
x = Single(a=1)
else:
try:
x = Single(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
x.single(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
Single.stat_single(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
try:
global_single(a=1)
raise RuntimeError("Missed throw")
except TypeError:
pass
else:
# Non-builtin should work as expected
# One mandatory parameter in API
x = One(a=1)
x.one(a=1)
One.stat_one(a=1)
global_one(a=1)
# Two mandatory parameters in API
x = Two(a=1, b=2)
x.two(a=1, b=2)
Two.stat_two(a=1, b=2)
global_two(a=1, b=2)
# Single optional parameter in API
x = Single(a=1)
x.single(a=1)
Single.stat_single(a=1)
global_single(a=1)
|
995,718 | 82109f7a4ad6d7a665692e5ce44b6586ece0b2d6 | import random
class Letters(object):
"""Getting random letters and reutning them"""
def __init__(self,owning_letters):
self.owning_letters=owning_letters
def having_letters(self):
"""Letters checked and added based on previous word
:returns list"""
self.letter=[chr(c) for c in range(97, 123)]
self.owning_letters=list()
i=0
while i<7:
temp=random.choice(self.letter)
if temp not in self.owning_letters:
self.owning_letters.append(temp)
i+=1
else:
continue
return self.owning_letters
def skip(self):
"""If person cant find a word with letters he is having he can can to chane his letter
:returns list"""
self.owning_letters=list()
for _ in range(7):
self.owning_letters.append(random.choice(self.letter))
return self.owning_letters
|
995,719 | a90330b71ffb41dbdd90aad1abef51318983bcde | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 04:56:49 2019
@author: Ayush
"""
#Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Dataset
dataset= pd.read_csv('Ads_CTR_Optimisation.csv')
#Implement thompsons
import random
N=10000
d=10
ads_selected=[]
number_of_rewards_1=[0]*d
number_of_rewards_0=[0]*d
total_rewards=0
for n in range(0,N):
ad=0
max_random=0
for i in range(0,d):
random_beta=random.betavariate(number_of_rewards_1[i]+1,number_of_rewards_0[i]+1)
if random_beta>max_random:
max_random=random_beta
ad=i
ads_selected.append(ad)
reward=dataset.values[n,ad]
if reward ==1:
number_of_rewards_1[ad]=number_of_rewards_1[ad]+1
else:
number_of_rewards_0[ad]=number_of_rewards_0[ad]+1
total_rewards=total_rewards+reward
#Visualise
plt.hist(ads_selected)
plt.show()
|
995,720 | 5d86361c8bb4ab72f6bec3af61dc066525c2eb32 | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
"""
This is the unittest for the Efficient/Sequential Elementary Effects module.
python -m unittest -v test_eee.py
python -m pytest --cov pyeee --cov-report term-missing -v tests/
"""
import unittest
# --------------------------------------------------------------------
# eee.py
# Missing coverage:
# 181-184: ImportError MPI
# 204: crank!=0 <- MPI
# 230-250: mask
# 276-279: weight
# 339-371: plotfile
# 383-385: logfile
# 391: return after step4
# 415-418: weight
# 445: return after step6
# 459: logfile
# 470-473: weight
# 483-489: logfile
# 494-509: no more parameters after screening
# 515: mask
# 524-526: logfile
class TestEee(unittest.TestCase):
def setUp(self):
import numpy as np
# seed for reproducible results
seed = 1234
np.random.seed(seed=seed)
self.ntfirst = 10
self.ntlast = 5
self.nsteps = 6
self.verbose = 1
# G function
def test_eee_g(self):
from functools import partial
import numpy as np
from pyeee import eee
from pyeee.utils import func_wrapper
from pyeee.functions import G
# Function and parameters
func = G
npars = 6
params = [78., 12., 0.5, 2., 97., 33.] # G
# Partialise function with fixed parameters
arg = [params]
kwarg = {}
obj = partial(func_wrapper, func, arg, kwarg)
# Screening
lb = np.zeros(npars)
ub = np.ones(npars)
out = eee(obj, lb, ub, mask=None,
ntfirst=self.ntfirst, ntlast=self.ntlast, nsteps=self.nsteps,
processes=1)
# Check
self.assertEqual(list(np.where(out)[0]+1), [2, 3, 4, 6])
# Gstar function with different interactions
def test_see_gstar(self):
from functools import partial
import numpy as np
from pyeee import eee, see
from pyeee.utils import func_wrapper
from pyeee.functions import Gstar
# Function and parameters
func = Gstar
npars = 10
params = [[[1]*npars, np.random.random(npars), [0., 0., 9., 9., 9., 9., 9., 9., 9., 9.]], # G*
[np.ones(npars), np.random.random(npars), [0., 0.1, 0.2, 0.3, 0.4, 0.8, 1., 2., 3., 4.]],
[np.ones(npars)*0.5, np.random.random(npars), [0., 0., 9., 9., 9., 9., 9., 9., 9., 9.]],
[np.ones(npars)*0.5, np.random.random(npars), [0., 0.1, 0.2, 0.3, 0.4, 0.8, 1., 2., 3., 4.]],
[np.ones(npars)*2.0, np.random.random(npars), [0., 0., 9., 9., 9., 9., 9., 9., 9., 9.]],
[np.ones(npars)*2.0, np.random.random(npars), [0., 0.1, 0.2, 0.3, 0.4, 0.8, 1., 2., 3., 4.]]
]
iiout = [[1, 2, 3, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 7, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 2, 3, 4, 5, 7]
]
lb = np.zeros(npars)
ub = np.ones(npars)
for ii in range(len(params)):
# Partialise function with fixed parameters
arg = params[ii]
kwarg = {}
obj = partial(func_wrapper, func, arg, kwarg)
out = see(obj, lb, ub, mask=None,
ntfirst=self.ntfirst, ntlast=self.ntlast, nsteps=self.nsteps,
processes=1, verbose=1) #, plotfile='gstar'+str(ii)+'.png')
# Check
self.assertEqual(list(np.where(out)[0]+1), iiout[ii])
# Bratley / K function
def test_eee_k(self):
from functools import partial
import os
import numpy as np
import schwimmbad
from pyeee import eee
from pyeee.utils import func_wrapper
from pyeee.functions import bratley
# Function and parameters
func = bratley
npars = 10
params = [] # k
# Screening
lb = np.zeros(npars)
ub = np.ones(npars)
nprocs = 4
ipool = schwimmbad.choose_pool(mpi=False, processes=nprocs)
out = eee(func, lb, ub, mask=None,
ntfirst=self.ntfirst, ntlast=self.ntlast, nsteps=self.nsteps,
processes=nprocs, pool=ipool, logfile='tlog.txt')
ipool.close()
# Check
self.assertEqual(list(np.where(out)[0]+1), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
self.assertTrue(os.path.exists('tlog.txt'))
# Clean
if os.path.exists('tlog.txt'): os.remove('tlog.txt')
# Morris function
def test_eee_fmorris(self):
from functools import partial
import os
import numpy as np
from pyeee import eee
from pyeee.utils import func_wrapper
from pyeee.functions import fmorris
# Function and parameters
func = fmorris
npars = 20
beta0 = 0.
beta1 = np.random.standard_normal(npars)
beta1[:10] = 20.
beta2 = np.random.standard_normal((npars,npars))
beta2[:6,:6] = -15.
beta3 = np.zeros((npars,npars,npars))
beta3[:5,:5,:5] = -10.
beta4 = np.zeros((npars,npars,npars,npars))
beta4[:4,:4,:4,:4] = 5.
# Partialise Morris function with fixed parameters beta0-4
arg = [beta0, beta1, beta2, beta3, beta4]
kwarg = {}
obj = partial(func_wrapper, func, arg, kwarg)
# Screening
lb = np.zeros(npars)
ub = np.ones(npars)
# Check
ff = open('tlog.txt', 'w')
out = eee(obj, lb, ub, mask=None,
ntfirst=self.ntfirst, ntlast=self.ntlast, nsteps=self.nsteps,
processes=4, logfile=ff, verbose=1)
ff.close()
self.assertEqual(list(np.where(out)[0]+1), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 14, 15, 20])
self.assertTrue(os.path.exists('tlog.txt'))
# Clean
if os.path.exists('tlog.txt'): os.remove('tlog.txt')
if __name__ == "__main__":
unittest.main()
|
995,721 | 1d4336f387c2030ead17b7dff957b4660cb963ec | # Generated by Django 3.2.8 on 2021-10-31 12:47
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('domain_user', '0005_auto_20211031_1810'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='domain_key',
field=models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False, unique=True),
),
]
|
995,722 | 109933b184cd6c89dba07b3483202ff6aec4c360 | '''
Print distance distributions of each trip segment
'''
from sys import argv
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from statsmodels.distributions.empirical_distribution import ECDF
matches_path = argv[1]
result_path = argv[2]
# read and transform
df_matches = pd.read_csv(matches_path)
df_matches['taxi_arrival_time_transit_stop'] = pd.to_datetime(df_matches['taxi_arrival_time_transit_stop'])
df_matches['transit_destination_time'] = pd.to_datetime(df_matches['transit_destination_time'])
df_matches['taxi_destination_time'] = pd.to_datetime(df_matches['taxi_destination_time'])
# get distances
list_integration_distance = []
list_shared_distance = []
# list_taxi_private = []
# list_transit_private = []
list_destinations_distance = []
for index, match in df_matches.iterrows():
total_distance = match['integration_distance'] + match['shared_distance'] + match['destinations_distance']
list_integration_distance.append(match['integration_distance']/total_distance)
list_shared_distance.append(match['shared_distance']/total_distance)
list_destinations_distance.append(match['destinations_distance']/total_distance)
# if match['transit_destination_time'] > match['taxi_destination_time']:
# list_transit_private.append(match['destinations_distance']/total_distance)
# list_taxi_private.append(0)
# else:
# list_transit_private.append(0)
# list_taxi_private.append(match['destinations_distance']/total_distance)
# plot
list_integration_distance.sort()
list_shared_distance.sort()
# list_taxi_private.sort()
# list_transit_private.sort()
list_destinations_distance.sort()
ecdf_integration_distance = ECDF(list_integration_distance)
ecdf_shared_distance = ECDF(list_shared_distance)
# ecdf_taxi_private = ECDF(list_taxi_private)
# ecdf_transit_private = ECDF(list_transit_private)
ecdf_destinations_distance = ECDF(list_destinations_distance)
fig, ax = plt.subplots()
plt.plot(ecdf_integration_distance.x, ecdf_integration_distance.y, label='integration distance')
plt.plot(ecdf_shared_distance.x, ecdf_shared_distance.y, label='shared distance')
# plt.plot(ecdf_taxi_private.x, ecdf_taxi_private.y, label='taxi private')
# plt.plot(ecdf_transit_private.x, ecdf_transit_private.y, label='transit private')
plt.plot(ecdf_destinations_distance.x, ecdf_destinations_distance.y, label='destinations distance')
# ax.xaxis.set_major_locator(ticker.MultipleLocator(20)) # set x sticks interal
plt.grid()
plt.legend()
# ax.set_title('saturday')
ax.set_xlabel('segment distance / total shared distance')
ax.set_ylabel('ECDF')
plt.tight_layout()
fig.savefig(result_path)
|
995,723 | 7ed9fc4f962cfc46881a0d4e07f2366cee6697b6 | import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
colors = plt.cm.cool
def error_sum(pred, y_test):
e = []
for i in range(len(pred)):
e.append((y_test[i] - pred[i])**2)
return np.asarray(e).sum()
X_train = np.load("regression/regression_Xtrain.npy")
y_train = np.load("regression/regression_ytrain.npy")
X_test = np.load("regression/regression_Xtest.npy")
y_test = np.load("regression/regression_ytest.npy")
Linear_regression = LinearRegression()
Linear_regression.fit(X_train.reshape(-1, 1), y_train)
prediction = Linear_regression.predict(X_test.reshape(-1, 1))
plt.title("Data Visualization")
plt.scatter(X_train, y_train, label="Train Data", c="red", cmap=colors)
plt.scatter(X_test, y_test, label="Test Data", c="blue", cmap=colors)
plt.legend()
plt.savefig("../plot/00.png")
plt.show()
print("MSE : " + str(mean_squared_error(y_test, prediction)))
plt.title("Linear regression")
plt.plot(X_test, prediction, label="Model Prediction")
plt.scatter(X_test, y_test, label="Training Data")
plt.legend()
plt.savefig("../plot/plot/0.png")
plt.show()
mean_square_error = np.zeros((9, 1))
x_range = np.linspace(-1, 5.5, 50).reshape(-1, 1)
mean_square_error[0] = mean_squared_error(y_test, prediction)
list_square_prediction = []
plt.figure(figsize=(16, 12))
for j in range(1, 10):
poly = PolynomialFeatures(degree=j, include_bias=False)
polynomial_X_train = poly.fit_transform(X_train.reshape(-1, 1))
Polynomial_Regression = LinearRegression()
Polynomial_Regression.fit(polynomial_X_train, y_train)
polynomial_test = poly.fit_transform(X_test.reshape(-1, 1))
prediction_poly = Polynomial_Regression.predict(polynomial_test)
plt.subplot(4, 3, j)
plt.title("Polynomial degree " + str(j) + " ")
plt.plot(X_test, prediction_poly, label="Model prediction")
plt.scatter(X_test, y_test, c='r', label="Training Data")
mean_square_error[j - 1] = mean_squared_error(y_test, prediction_poly)
#print("Squared prediction errors for all n data points : "+str(error_sum(prediction_poly,y_test)))
list_square_prediction.append(error_sum(prediction_poly, y_test))
plt.legend()
plt.savefig("../plot/2.png")
plt.show()
plt.title("Mean Square error over polynomial degree")
plt.plot(np.linspace(1, 9, 9).reshape(-1, 1), mean_square_error)
plt.savefig("../plot/3.png")
plt.show()
# for i in range(len(list_square_prediction)):
# print("Polynomial degree "+str(i+1)+" - Squared prediction error : "+str(list_square_prediction[i]))
for i in range(len(mean_square_error)):
print("Polynomial degree " + str(i + 1) +
" - Mean Squared prediction error : " + str(mean_square_error[i]))
|
995,724 | e96c34904df19459b317eef2a3f4095e6997924e | from django.urls import path
from django.conf.urls import url, include
from . import views
from . import utils
urlpatterns = [
path('add/', views.add_factoid, name='add_factoid'),
path('random/', utils.random_factoid, name='random_factoid'),
path('user/', views.factoids_list, name='factoids_list'),
path('<int:id>/', views.factoid_detail, name='factoid_detail'),
path('<int:id>/delete/', views.delete_factoid, name='delete_factoid')
] |
995,725 | 010a0f6ee066ea8bd101251dedc164b2ab7557fa | from django.shortcuts import render
# Create your views here.
def home_view(request):
return render(request,'testapp/home.html')
def educ_view(request):
return render(request,'testapp/edu.html')
def poli_view(request):
return render(request,'testapp/polit.html')
def sprt_view(request):
return render(request,'testapp/sport.html')
|
995,726 | 421cf573786b2c6924a695914f979e0935875829 | from typing import List
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
方法一:使用一个新数组存储交换位置以后的数据,再覆盖原数组
"""
k %= len(nums)
if k == 0: return
new_nums = nums[-k:] + nums[:-k]
for i in range(len(nums)):
nums[i] = new_nums[i]
def rotate2(self, nums: List[int], k: int) -> None:
"""
方法二:暴力旋转
"""
k %= len(nums)
if k == 0: return
for i in range(k):
fill = nums[len(nums) - 1]
for j in range(len(nums)):
fill, nums[j] = nums[j], fill
def rotate3(self, nums: List[int], k: int) -> None:
"""
方法三:多次反转
"""
k %= len(nums)
if k == 0: return
self.rev(nums, 0, len(nums) - 1)
self.rev(nums, 0, k - 1)
self.rev(nums, k, len(nums) - 1)
def rev(self, nums: List[int], start: int, end: int) -> None:
while start < end:
nums[start], nums[end], start, end = nums[end], nums[start], start + 1, end - 1
|
995,727 | f3c72f955293fd4ca0c48b1a7bf40c9d08e7e37c | from ray import Ray
from vector import Vector
class Camera:
def __init__(self):
self.lowerLeftCorner = Vector(-2.0, -1.0, -1.0)
self.horizontal = Vector(4.0, 0.0, 0.0)
self.vertical = Vector(0.0, 2.0, 0.0)
self.origin = Vector(0.0, 0.0, 0.0)
def getRay(self, u, v):
direction = self.lowerLeftCorner + self.horizontal.multiply_scalar(u)
direction = direction + self.vertical.multiply_scalar(v) - self.origin
return Ray(self.origin, direction)
|
995,728 | e61d3486265d7520a6beebf120b8a0ab16962768 | import numpy
import math
import cmath
def detect_coefficient(X,RATE,FREQ):
s_prev=0
s_prev2=0
norm_freq=FREQ*1./RATE
coeff=math.cos(2*math.pi*norm_freq)
for x in X:
s=x/32768.+2*coeff*s_prev-s_prev2
s_prev2=s_prev
s_prev=s
return s_prev2**2+s_prev**2-2*coeff*s_prev*s_prev2
def approx_coeff(X,RATE,FREQ):
out=[]
for i in range(-2,2):
out.append(detect_coefficient(X,RATE,FREQ+i*2))
return max(out)
def DTMF(X,RATE):
freqs=[697,778,852,941,1209,1336,1477,1633]
r=[approx_coeff(X,RATE,freqs[i]) for i in range(len(freqs))]
row_col_ascii_codes = [["1","2","3","A"],["4","5","6","B"],["7","8","9","C"],["*","0","#","D"]]
maxval=0.0
row=0
col=0
see_digit=0
max_index=0
t=0
i=0
for i in range(4):
if r[i]>maxval:
maxval=r[i]
row=i
for i in range(4,8):
if r[i]>maxval:
maxval=r[i]
col = i
if True:
see_digit=True
if r[col]>r[row]:
max_index = col
if r[row]< (r[col]*0.016):
see_digit=False
else:
max_index=row
if r[col]< (r[row]*0.016):
see_digit=False
peak_count=0
if r[max_index]>1.0e9:
t = r[max_index]*0.063
else:
t = r[max_index] * 0.063
for i in range(8):
if r[i]>t:
peak_count+=1
if peak_count>2:
see_digit=False
if see_digit: return row_col_ascii_codes[row][col-4]
return None
|
995,729 | 8be89f62e69b3c768ebf590c2cc454cc52730b95 | from collections import deque
def rotate(magnetic, direction):
if direction == 1:
tmp = gears[magnetic].pop()
gears[magnetic].appendleft(tmp)
elif direction == -1:
tmp = gears[magnetic].popleft()
gears[magnetic].append(tmp)
def cal(curmag, exmag, flow):
curmagcheck = 6
exmagcheck = 2
if flow == 1:
curmagcheck = 2
exmagcheck = 6
if gears[curmag][curmagcheck] != gears[exmag][exmagcheck]:
if magdirection[exmag] == 1:
magdirection[curmag] = -1
else:
magdirection[curmag] = 1
return 0
return 1
T = int(input())
for tc in range(1, T+1):
K = int(input())
gears = [deque(list(map(int, input().split()))) for _ in range(4)]
result = 0
for _ in range(K):
mag, direction = map(int, input().split())
mag -= 1
magdirection = [0] * 4
magdirection[mag] = direction
for curmagidx in range(mag-1, -1, -1):
exmagidx = curmagidx + 1
if cal(curmagidx, exmagidx, 1):
break
for curmagidx in range(mag+1, 4):
exmagidx = curmagidx - 1
if cal(curmagidx, exmagidx, 0):
break
for ro in range(4):
rotate(ro, magdirection[ro])
for idx, mag in enumerate(gears):
if mag[0] == 1:
result += 2 ** idx
print('#{} {}'.format(tc, result)) |
995,730 | 1f21e0ebd16d9a90a3b49abd9b2bdc67fa8662a0 | from src import Props as props
import random as rng
import numpy as np
def AddProps(system):
wall_angle = 90 + np.rad2deg(-np.arctan(211/1380-0.05))
props.MIT_door(system, [-5, 3.3, 8.22])
props.MIT_door(system, [13.49,3.3, 1.2], wall_angle)
props.MIT_door(system, [14.48, 3.3, -8.5], wall_angle)
props.MIT_door(system, [13.04, 6.6, 5.5], wall_angle)
props.MIT_door(system, [14.48, 6.6, -8.5], wall_angle)
props.MIT_door(system, [7.25, 3.3, 12.22])
props.MIT_door(system, [7.25, 6.6, 12.22])
props.MIT_door(system, [13.6, 3.3, 12.22])
props.MIT_door(system, [17.6, 3.3, 12.22])
# Marks global origin
# props.sodacan(system, [0,0,0], 'schrodbull.png')
props.painting(system, [13.8,2.0,-2], 'DemoBengan.png', wall_angle)
props.painting(system, [13.94,1.8,-3.35], 'bungeebengan_notes.png', wall_angle, [0.2,0.27])
props.painting(system, [6.46,5.0,10], 'infoboard.jpg', -90, [0.85/2, 1.46/2])
props.painting(system, [13.34,2.0,2.2], 'floorinfo2.jpg', wall_angle, [0.6,0.66])
props.painting(system, [12.84,5.1,7.1], 'corkboard.jpg', wall_angle, [1.0,0.5])
props.painting(system, [13.047,5.1,5.1], 'corkboard.jpg', wall_angle, [1.0,0.5])
props.painting(system, [13.77,5.1,-1.9], 'corkboard.jpg', wall_angle, [1.0,0.5])
props.painting(system, [13.977,5.1,-3.9], 'corkboard.jpg', wall_angle, [1.0,0.5])
# props.painting(system, [-3.5,2.1,5], 'walkplanck.png', 0, [0.8,0.6])
props.pokeball(system, [1.75,0.85,-7.15], 0)
props.sodacan(system, [1.25,0.85,-7.05], 'schrodbull.png', 180)
props.sodacan(system, [4.25,0.85,-7.25], 'joultcola.png')
# props.coin(system, [1.65,0.85,-7.25])
s = 1.15 # scale due to high roof
props.painting(system, [1.0, 5.0, 8.18], 'black_painting_1.png', 0, [s*1.17/2, s*0.91/2])
props.painting(system, [3.1, 5.0, 8.18], 'black_painting_2.png', 0, [s*1.17/2, s*0.91/2])
props.painting(system, [5.2, 5.0, 8.18], 'black_painting_3.png', 0, [s*1.17/2, s*0.91/2])
# East wall floor vents
props.floorvent(system, [-0.55, 0.0, -8.85])
props.floorvent(system, [ 3.85, 0.0, -8.85])
# South wall floor vents
props.floorvent(system, [-5.3, 0.0, -4.15])
props.floorvent(system, [-5.3, 0.0, 0.3])
# In the staircase
# props.pokeball(system, [7.35,4.85,2.5], 0)
# props.pokeball(system, [7.355,5.05,2.55], 0)
# On the dartboard
# props.pokeball(system, [1.95,1.501,-3.4], -45)
# props.sodacan(system, [2.02,1.501,-3.5], 'schrodbull.png')
# props.sodacan(system, [1.7,1.501,-3.3], 'joultcola.png')
# Pink dinosaur
# props.dino(system, [5.75,0.85,-9.75], 210, .15)
nr_unboxes = [5,4,2]
base_pos = np.array([ 2, 0, 6.5])
direction = np.array([0,0,1])
# base_pos = np.array([ 11, 0, -5])
# direction = np.array([1,0,-0.1])
base_pos = np.array([ 8, 0, 4.2])
direction = np.array([1,0,-0.4])
direction = direction/np.linalg.norm(direction)
goals= [3,16,12,1,15,4,11,9,2,6,10,8,5,17,14,16,13]
i=0
for row in range(len(nr_unboxes)):
for nr in range(nr_unboxes[row]):
skew = 10*(2*rng.random()-1) + 90*rng.randint(0,3) + 180/np.pi*np.arccos(np.dot(direction, np.array([1,0,0])))
pos = base_pos + (nr_unboxes[row]/4 - 0.5*nr)*direction + np.array([0,0.4*row,0])
# goal_nr = rng.randint(0,17)
goal_nr = goals[i]
props.UNbox(system, pos, goal_nr, skew)
i=i+1
# grid_x = [-5.3, -5.4+4.8+4.5+4.5-0.2]
# grid_z = [-8.8, -6+4.8+4.5+4.5-0.2]
# dx = grid_x[1]-grid_x[0]
# dz = grid_x[1]-grid_x[0]
# for ball in range(400):
# props.eyeball(system, [grid_x[0] + rng.random()*dx, 50+rng.random(), grid_z[0] + rng.random()*dz], radius=0.15)
# props.sponsorFlag(system, [0,0,-5], 'zert1.png')
# props.sponsorFlag(system, [0,0,-7], 'zert2.png')
props.measureBox(system, [-4,0.3,-6], [0.4,0.4,0.4])
# ------- SPONSORS ---------
# Algoryx
props.painting(system, [-1.5,2.1,5], 'sponsorer/spons_algoryx.png', 0, [2,0.6])
props.sponsorFlag(system, [-4.8,0,-4.4], 'sponsorer/algoryx-flagga.png', 0.8)
# RS Components
props.sponsorFlag(system, [-4.8,0,3.5], 'sponsorer/rs logo.png', 0.8)
props.sponsorFlag(system, [-4.8,0,-3.4], 'sponsorer/rs logo.png', -0.8)
# ProAnt
props.sponsorFlag(system, [-4,0,4.5], 'sponsorer/proant-logo.png', -0.8)
props.sponsorFlag(system, [-1,0,-8], 'sponsorer/proant-logo.png', -0.8)
# PODIUM
# props.podium(system, [-3.6,0.3,-7.25], 1) |
995,731 | 269e127f12d5ede1600ee2e470e409c7290a62ed | from google.appengine.ext import ndb
class Note(ndb.Model):
date_created = ndb.DateTimeProperty(auto_now_add=True)
text = ndb.StringProperty()
owner = ndb.StringProperty()
subject = ndb.StringProperty()
title = ndb.StringProperty()
class Song(ndb.Model):
title = ndb.StringProperty()
artist = ndb.StringProperty()
owner = ndb.StringProperty()
subject = ndb.StringProperty()
link = ndb.StringProperty()
class Subject(ndb.Model):
name = ndb.StringProperty()
owner = ndb.StringProperty()
class ModelWithUser(ndb.Model):
nickname = ndb.StringProperty()
user_id = ndb.StringProperty()
joined_on = ndb.DateTimeProperty(auto_now_add=True) #changes when it is first created
updated_on = ndb.DateTimeProperty(auto_now=True) #changes whenever its active
first_name = ndb.StringProperty()
last_name = ndb.StringProperty()
@classmethod
def get_by_user(cls, user):
return cls.query().filter(cls.user_id == user.user_id()).get()
|
995,732 | 0b7cc39e8150b2c13b86fc1f211569702bf7d50c | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findDuplicateSubtrees(self, root: Optional[TreeNode]) -> List[Optional[TreeNode]]:
self.ans = []
self.visited = {}
def helper(r):
if r:
left = helper(r.left)
right = helper(r.right)
record = '.'.join([str(r.val), left, right])
if record not in self.visited:
self.visited[record] = 1
else:
self.visited[record] += 1
if self.visited[record] == 2:
self.ans.append(r)
return record
else:
return ''
helper(root)
return self.ans
#Runtime: 56 ms, faster than 98.42% of Python3 online submissions for Find Duplicate Subtrees.
#Memory Usage: 22.1 MB, less than 71.51% of Python3 online submissions for Find Duplicate Subtrees.
#Fu-Ti, Hsu
#shifty049@gmail.com |
995,733 | 35e8a302212eb2ffca9c834bc4b1f29bc533e27d | # Unit Testing
def is_equal(a, b):
if a == b:
print('sai')
return True
return False |
995,734 | c752d8e4f6a07713e94dd84b981365caa273462c | from aksdp.data import RawData, DataFrameData
from aksdp.repository import S3FileRepository, LocalFileRepository
import unittest
from pathlib import Path
import os
class TestS3FileRepository(unittest.TestCase):
def setUp(self):
# TODO:このテストを実行するには AWSのアクセスキー・テスト用バケットが必要です
self.access_key_id = os.getenv("aws_access_key_id")
self.secret_access_key = os.getenv("aws_secret_access_key")
self.s3file_url = os.getenv("s3file_url")
def test_raw(self):
# ローカルのファイルを読んでS3に保存
repo = LocalFileRepository(Path(os.path.dirname(__file__)) / Path("titanic.csv"))
data = RawData.load(repo)
self.assertIsNotNone(data)
repo_s3 = S3FileRepository(self.access_key_id, self.secret_access_key, self.s3file_url)
data.repository = repo_s3
data.save()
def test_dataframe(self):
# ローカルのファイルを読んでS3に保存
repo = LocalFileRepository(Path(os.path.dirname(__file__)) / Path("titanic.csv"))
data = DataFrameData.load(repo)
repo_s3 = S3FileRepository(self.access_key_id, self.secret_access_key, self.s3file_url)
data.repository = repo_s3
data.save()
# S3からファイルを読み込み
data2 = DataFrameData.load(repo_s3)
self.assertTrue(len(data2.content) > 0)
|
995,735 | 3082a426e64091c7c67d7d21a41d3325ab25cf90 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 9 13:11:44 2021
@author: prcohen
"""
import copy
import numpy as np
import numpy.ma as ma
from numpy.random import default_rng
rng = default_rng(1108) # stuff for random sampling; fix random seed
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib as mpl
import ternary
#______________________ Useful Vectorized Functions _________________________
def ps (x):
''' print a table of reals to p places precision '''
for i in range(len(x)):
print(*[f"{j:.3f}\t" for j in x[i,:]])
print()
def row_sums (a):
return np.sum(a,axis = 1)
def col_sums (a):
return np.sum(a,axis = 1)
def row_margins (a):
return np.sum(a,axis=1)/a.shape[1]
def col_margins (a):
return np.sum(a,axis=0)/a.shape[0]
def row_renorm (a):
return a / (row_sums(a)[:,np.newaxis])
def index2onehot (index, shape):
''' index is an np.array of length r that holds indices between 0 and c - 1.
This returns an array of shape r,c that contains a one-hot encoding of the
column indicated by index; e.g., for c = 3 and index = np.array([0,2,1]),
index2onehot(index) -> [[1. 0. 0.],[0. 0. 1.],[0. 1. 0.]] '''
zeros = np.zeros(shape)
zeros[np.indices(index.shape)[0], index]=1
return zeros
def onehot2index (x):
return np.argmax(x,axis=1)
def row_min_onehot (scores):
''' One-hot encoding of the column that holds the minimum score. If
two columns hold the same minimum, this takes the first.'''
return index2onehot(np.argmin(scores,axis=1),scores.shape)
def row_max_onehot (scores):
''' One-hot encoding of the column that holds the maximum score. If
two columns hold the same maximum, this takes the first.'''
return index2onehot(np.argmax(scores,axis=1),scores.shape)
def row_sample (probs):
''' probs is a 2D array in which each row is a multinomial distribution. This
returns a one-hot encoding of the colum selected by sampling from each row.
For machine learning purposes, this choice must run fast. Parts of the solution are
https://bit.ly/3AXSWJV, https://bit.ly/3peWVzv, https://bit.ly/3G3aSq3 '''
chosen_col = (probs.cumsum(1) > rng.random(probs.shape[0])[:,None]).argmax(1)
return index2onehot(chosen_col,probs.shape)
def ged (p0, p1):
''' Generalized euclidean distance. p0 and p1 must both have the same number
of columns, c, and p0 must be broadcastable to p1. Each row is treated as a
point in c-dimensional space. This returns an array of distances of shape r,c,
where r is the number of rows in p1. It uses numpy linalg.norm, which allows for
different distance measures than euclidean distance but one could also use
the more familiar np.sum((p0-p1)**2,axis=1)**.5 . '''
return np.linalg.norm(p0-p1, axis = 1)
def move_away (probs, index):
n = probs.shape[1] - 1
x = copy.copy(probs)
redistribute = (x[index][:,np.newaxis])/n
x += redistribute
x[index]=0
return x
#%%
mpl.rcParams['figure.dpi'] = 200
mpl.rcParams['figure.figsize'] = (1.5,1.5)
plt.rcParams.update({'font.size':5})
def hist (x,bins=25,ticks=8):
plt.locator_params(axis='x', nbins=ticks)
plt.rcParams.update({'font.size':5})
plt.hist(x,bins=bins)
#%%
def plot_ternary (vertex_labels, points, special_points = None, color_by=None,
color_by_label=None, bounds = None, figsize = (4,4)):
''' wraps Marc Harper's python-ternary package https://github.com/marcharper/python-ternary'''
mpl.rcParams['figure.dpi'] = 200
mpl.rcParams['figure.figsize'] = figsize
### Scatter Plot
scale = 1.0
fontsize = 8
offset = 0.03
figure, tax = ternary.figure(scale=scale)
#tax.set_title("Decision Space", fontsize=12)
tax.boundary(linewidth= .5)
tax.left_corner_label(vertex_labels[0], fontsize=12)
tax.top_corner_label(vertex_labels[1], fontsize=12)
tax.right_corner_label(vertex_labels[2], fontsize=12)
tax.get_axes().axis('off')
tax.gridlines(multiple=0.2, color="black")
tax.clear_matplotlib_ticks()
tax.ticks(axis='lbr', linewidth=1, multiple=0.1, fontsize=fontsize, offset=offset, tick_formats="%.1f")
if color_by is not None:
cmap = plt.cm.RdYlGn
if bounds is not None:
norm = mpl.colors.BoundaryNorm(bounds,cmap.N)
else:
norm = mpl.colors.Normalize(vmin=np.min(color_by), vmax=np.max(color_by))
color = cmap(norm(list(color_by)))
figure.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
orientation='horizontal', label=color_by_label)
tax.scatter(points, marker='o', color=color, s = 3, cmap=cmap)
else:
tax.scatter(points, marker='o', color='black', s = 3)
if special_points is not None:
for p,c in zip(special_points[0],special_points[1]):
tax.scatter([p], marker='s', color = [c], s = 10)
tax.gridlines(multiple=5, color="blue")
#tax.legend(loc = 'upper right',cmap=cmap)
#tax.ticks(axis='lbr', linewidth=1, multiple=5)
tax.show()
ternary.plt.show()
|
995,736 | cbf3cb73451a5869dd073e6990268bcef092d5f7 | def main():
cadena = input()
probar = input()
inversa = cadena[-1::-1]
if (probar==inversa):
print('YES')
else:
print('NO')
main() |
995,737 | 5515b3232f82fbfcea727891052b532f07a2a03a | import itertools
import logging
from typing import List, Dict, Tuple, Optional
import numpy as np
from checkmate.core.dfgraph import DFGraph
from checkmate.core.schedule import OperatorEvaluation, AllocateRegister, DeallocateRegister, Schedule, SchedulerAuxData
from checkmate.core.utils.definitions import active_env_var_flags
from checkmate.core.utils.timer import Timer
class InfeasibleScheduleError(ValueError):
pass
class ScheduleBuilder:
def __init__(self, g, verbosity: int = 2):
self.max_ram = 0
self.current_ram = 0
self.total_cpu = 0
self.g = g
self.schedule = [] # type: Schedule
self.live_registers = {} # type: Dict[int, int]
self.next_free_register_id = 0
self.verbosity = verbosity
self.ram_timeline = [] # type: List[int]
def is_op_cached(self, op_id: int):
return op_id in self.live_registers.keys()
def allocate_register(self, op_id: int):
"""
Schedule a register allocation
:param op_id: ID for operation whose output will be stored in this register,
:return: the newly allocated register ID
"""
if op_id in self.live_registers.keys():
if self.verbosity >= 2:
logging.error("Double alloc register for op #{}, reusing reg #{}".format(op_id, self.live_registers[op_id]))
return self.live_registers[op_id]
reg = AllocateRegister(self.next_free_register_id, op_id, self.g.cost_ram[op_id])
self.live_registers[op_id] = reg.register_id
self.schedule.append(reg)
self.next_free_register_id += 1
self.max_ram = max(self.max_ram, self.current_ram)
self.ram_timeline.append(self.current_ram)
self.current_ram += self.g.cost_ram[op_id]
return reg.register_id
def run_operator(self, op_id: int, update_aux_vars: bool):
if not all([pred in self.live_registers.keys() for pred in self.g.predecessors(op_id)]):
raise InfeasibleScheduleError(
"Dependency not fulfilled for op #{}, ops in ram now are {} but I need {}".format(
op_id, set(self.live_registers.keys()), self.g.predecessors(op_id)
)
)
out_reg = self.allocate_register(op_id)
in_regs = {pred_id: self.live_registers[pred_id] for pred_id in self.g.predecessors(op_id)}
eval_op = OperatorEvaluation(
op_id,
in_regs,
out_reg,
self.g.cost_cpu[op_id],
update_aux_vars=update_aux_vars,
is_backwards=op_id not in self.g.vfwd,
)
self.schedule.append(eval_op)
self.total_cpu += self.g.cost_cpu[op_id]
self.ram_timeline.append(self.current_ram)
def deallocate_register(self, op_id: int):
"""
Schedule a register deallocation
:param op_id: ID for operation whose output will be stored in this register
"""
if op_id not in self.live_registers.keys():
print("WARNING! Double free output register for op #{}".format(op_id))
reg_id = self.live_registers.pop(op_id)
self.schedule.append(DeallocateRegister(op_id, reg_id))
self.current_ram -= self.g.cost_ram[op_id]
self.ram_timeline.append(self.current_ram)
def schedule_from_rs(g: DFGraph, r: np.ndarray, s: np.ndarray) -> Tuple[Optional[Schedule], Optional[SchedulerAuxData]]:
debug_collect_ram_usage = "DEBUG_SCHEDULER_RAM" in active_env_var_flags
if r is None or s is None:
return None, None # infeasible
T = g.size
def _used_after(t_, u_, i_):
"""Returns True if v_u is used after v_i in stage t"""
is_retained_snapshot = t_ < T - 1 and s[t_ + 1, u_] == 1
is_used_by_successor = not all([r[t_, v] == 0 or v <= i_ for v in g.successors(u_)])
return is_retained_snapshot or is_used_by_successor
with Timer("schedule_rs_matrix") as schedule_timer:
# compute last usage to determine whether to update auxiliary variables
# last_used = {i: max([t for t in range(T) if r[t, i] == 1]) for i in range(T)}
mem_usage = np.zeros((T, T), dtype=np.int)
sb = ScheduleBuilder(g, verbosity=1)
for t in range(T):
# Free unused checkpoints
if debug_collect_ram_usage:
for i in filter(lambda x: sb.is_op_cached(x), range(T)):
if not _used_after(t, i, i):
sb.deallocate_register(i)
for i in range(T):
if r[t, i] == 1:
# sb.run_operator(i, last_used[i] == t)
sb.run_operator(i, False) # todo(paras) prune away last_used in favor of recompute blacklist
if debug_collect_ram_usage:
mem_usage[t, i] = sb.current_ram + g.cost_ram_fixed
# Free memory
if debug_collect_ram_usage:
for u in filter(lambda x: sb.is_op_cached(x), itertools.chain(g.predecessors(i), [i])):
if not _used_after(t, u, i):
sb.deallocate_register(u)
total_ram = sb.max_ram + g.cost_ram_fixed
ram_timeline = [mem + g.cost_ram_fixed for mem in sb.ram_timeline]
return (
sb.schedule,
SchedulerAuxData(
R=r,
S=s,
cpu=sb.total_cpu,
peak_ram=total_ram,
activation_ram=sb.max_ram,
mem_grid=mem_usage,
mem_timeline=ram_timeline,
schedule_time_s=schedule_timer.elapsed,
),
)
|
995,738 | 50d20ae0993ead6d3a5400227513a7eae0d7651d | #!/usr/bin/env python
import sys
import math
import json
import tf_conversions
import tf2_geometry_msgs
import rospy
import tf2_ros
import tf.transformations as tf_trans
# from tf.transformations import quaternion_from_euler, euler_from_quaternion, concatenate_matrices, translation_matrix, quaternion_matrix
from geometry_msgs.msg import TransformStamped, Vector3, PoseStamped, Transform, Quaternion
from aruco_msgs.msg import MarkerArray
from crazyflie_gazebo.msg import Position
def marker_callback(msg):
for marker in msg.markers:
broadcast_marker_transform(marker)
def broadcast_marker_transform(m):
global trans
odom_map = PoseStamped()
odom_aruco = PoseStamped()
t = TransformStamped()
m.header.frame_id = 'cf1/camera_link'
if not tf_buf.can_transform(frame_id, m.header.frame_id, m.header.stamp, rospy.Duration(tf_timeout)):
rospy.logwarn_throttle(5.0, 'No transform from %s to %s', m.header.frame_id, frame_id)
return
# get the aruco's pose in odom frame
marker = tf_buf.transform(PoseStamped(header=m.header, pose=m.pose.pose), frame_id)
# inverse the transform, get the odom's pose in aruco frame
trsl =[marker.pose.position.x, marker.pose.position.y, marker.pose.position.z]
rot = [marker.pose.orientation.x, marker.pose.orientation.y, marker.pose.orientation.z, marker.pose.orientation.w]
transform = tf_trans.concatenate_matrices(tf_trans.translation_matrix(trsl), tf_trans.quaternion_matrix(rot))
inversed_transform = tf_trans.inverse_matrix(transform)
odom_aruco.header = marker.header
odom_aruco.header.frame_id = 'aruco/marker' + str(m.id)
odom_aruco.pose.position = Vector3(*tf_trans.translation_from_matrix(inversed_transform))
odom_aruco.pose.orientation = Quaternion(*tf_trans.quaternion_from_matrix(inversed_transform))
# print(odom_aruco)
# get the odom's pose in map frame
if not tf_buf.can_transform(odom_aruco.header.frame_id, 'map', odom_aruco.header.stamp,rospy.Duration(tf_timeout)):
rospy.logwarn_throttle(5.0, 'No transform from %s to map' % odom_aruco.header.frame_id)
return
# Transform from aruco to map
odom_map = tf_buf.transform(odom_aruco, 'map')
# print(odom_map)
# only give ywa, x, y to odom/map transform
t = TransformStamped()
t.transform.translation = odom_map.pose.position
t.transform.translation.z = 0
roll, pitch, yaw = tf_trans.euler_from_quaternion((odom_map.pose.orientation.x,
odom_map.pose.orientation.y,
odom_map.pose.orientation.z,
odom_map.pose.orientation.w))
(t.transform.rotation.x,
t.transform.rotation.y,
t.transform.rotation.z,
t.transform.rotation.w) = tf_trans.quaternion_from_euler(0,
0,
yaw)
trans.transform = t.transform
rospy.sleep(rospy.Duration(1)) # this is the rate at which we update the odom from measurements
rospy.init_node('ArucoLocalization')
sub_marker = rospy.Subscriber('/aruco/markers', MarkerArray, marker_callback, queue_size=1)
tf_buf = tf2_ros.Buffer()
tf_lstn = tf2_ros.TransformListener(tf_buf)
br = tf2_ros.TransformBroadcaster()
tf_timeout = 2
frame_id = 'cf1/odom'
trans = TransformStamped()
trans.transform.rotation.w = 1
trans.header.frame_id = 'map'
trans.child_frame_id = 'cf1/odom'
def main():
global trans
rate = rospy.Rate(40) # a too fast rate will cause a lot of wobbling in the drone between aruco measurements
while not rospy.is_shutdown():
trans.header.stamp = rospy.Time.now()
br.sendTransform(trans)
rate.sleep()
if __name__ == '__main__':
main()
|
995,739 | 0610dc5dbcbf1f512f6285bdb1beff7f96cdd032 | """This class performs database queries for the notification_spool table"""
import datetime
__license__ = "GPLv3"
class Notification:
def __init__(self, db, verbose, notification_type, notification_origin, process_id):
"""
Constructor method for the Notification class.
:param db : Database class object
:type db : object
:param verbose : whether to be verbose
:type verbose : bool
:param notification_type : notification type to use for the notification_spool table
:type notification_type : str
:param notification_origin: notification origin to use for the notification_spool table
:type notification_origin: str
:param process_id : process ID to use for the notification_spool table
:type process_id : str
"""
self.db = db
self.verbose = verbose
self.notification_type = notification_type
self.notification_origin = notification_origin
self.process_id = process_id
def write_to_notification_spool(self, message, is_error, is_verbose, center_id=None):
"""
Insert a row in the notification_spool table.
:param message : message to be inserted in the notification_spool table
:type message : str
:param is_error : whether the notification is an error or not ('Y' or 'N')
:type is_error : str
:param is_verbose: whether the notification is verbose or not ('Y' or 'N')
:type is_verbose: str
:param center_id : the CenterID associated with the notification when applicable
:type center_id : int
:return:
"""
type_id = self.db.grep_id_from_lookup_table(
id_field_name='NotificationTypeID',
table_name='notification_types',
where_field_name='Type',
where_value=self.notification_type,
insert_if_not_found=True
)
col_names = (
'NotificationTypeID', 'TimeSpooled', 'Message', 'Origin',
'ProcessID', 'Error', 'Verbose'
)
values = (
type_id, datetime.datetime.now(), message, self.notification_origin,
self.process_id, is_error, is_verbose
)
if center_id:
col_names = col_names + ('CenterID',)
values = values + (center_id,)
self.db.insert(
table_name = 'notification_spool',
column_names = col_names,
values = values
) |
995,740 | 0b6b362c8d56b4399304d42a9b9ca1d71d3ba473 | from torch import nn
from manopth import rodrigues_layer
from meshreg.datasets.queries import BaseQueries, TransQueries
from meshreg.models import project
from libyana.camutils import project as camproject
class ObjBranch(nn.Module):
def __init__(self, trans_factor=1, scale_factor=1):
"""
Args:
trans_factor: Scaling parameter to insure translation and scale
are updated similarly during training (if one is updated
much more than the other, training is slowed down, because
for instance only the variation of translation or scale
significantly influences the final loss variation)
scale_factor: Scaling parameter to insure translation and scale
are updated similarly during training
"""
super(ObjBranch, self).__init__()
self.trans_factor = trans_factor
self.scale_factor = scale_factor
self.inp_res = [256, 256]
def forward(self, sample, scaletrans=None, scale=None, trans=None, rotaxisang=None):
"""
Args:
scaletrans: torch.Tensor of shape [batch_size, channels] with channels == 6
with in first position the predicted scale values and in 2,3 the
predicted translation values, and global rotation encoded as axis-angles
in channel positions 4,5,6
"""
if scaletrans is None:
batch_size = scale.shape[0]
else:
batch_size = scaletrans.shape[0]
if scale is None:
scale = scaletrans[:, :1]
if trans is None:
trans = scaletrans[:, 1:3]
if rotaxisang is None:
rotaxisang = scaletrans[:, 3:]
# Get rotation matrixes from axis-angles
rotmat = rodrigues_layer.batch_rodrigues(rotaxisang).view(rotaxisang.shape[0], 3, 3)
canobjverts = sample[BaseQueries.OBJCANVERTS].cuda()
rotobjverts = rotmat.bmm(canobjverts.float().transpose(1, 2)).transpose(1, 2)
final_trans = trans.unsqueeze(1) * self.trans_factor
final_scale = scale.view(batch_size, 1, 1) * self.scale_factor
height, width = tuple(sample[TransQueries.IMAGE].shape[2:])
camintr = sample[TransQueries.CAMINTR].cuda()
objverts3d, center3d = project.recover_3d_proj(
rotobjverts, camintr, final_scale, final_trans, input_res=(width, height)
)
# Recover 2D positions given camera intrinsic parameters and object vertex
# coordinates in camera coordinate reference
pred_objverts2d = camproject.batch_proj2d(objverts3d, camintr)
if BaseQueries.OBJCORNERS3D in sample:
canobjcorners = sample[BaseQueries.OBJCANCORNERS].cuda()
rotobjcorners = rotmat.bmm(canobjcorners.float().transpose(1, 2)).transpose(1, 2)
recov_objcorners3d = rotobjcorners + center3d
pred_objcorners2d = camproject.batch_proj2d(rotobjcorners + center3d, camintr)
else:
pred_objcorners2d = None
recov_objcorners3d = None
rotobjcorners = None
return {
"obj_verts2d": pred_objverts2d,
"obj_verts3d": rotobjverts,
"recov_objverts3d": objverts3d,
"recov_objcorners3d": recov_objcorners3d,
"obj_scale": final_scale,
"obj_prescale": scale,
"obj_prerot": rotaxisang,
"obj_trans": final_trans,
"obj_pretrans": trans,
"obj_corners2d": pred_objcorners2d,
"obj_corners3d": rotobjcorners,
}
|
995,741 | 1ead0856c2cb362a6211a787352f0299502c622e | # -*- coding: utf-8 -*-
import os
from datetime import date
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from texting.models import ClientLogo, Client
from texting.forms import ClientForm, OfferForm, ContestForm
class TextingClientTestCase(TestCase):
def test_client_form(self):
bad_data = [
({'name': 'another name', 'description': ''},
{'description': ['This field is required.']}),
({'name': '', 'description': '1234567891'},
{'name': ['This field is required.']}),
]
for (data, field_errors) in bad_data:
form = ClientForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), len(field_errors))
for (field, errors) in field_errors.items():
form_errors = form.errors[field]
self.assertEqual(len(form_errors), len(errors))
self.assertTrue(all(e in form_errors for e in errors))
form = ClientForm({'name': 'valid name', 'description': 'valid description'})
self.assertTrue(form.is_valid())
def test_client_create_view(self):
u = User.objects.create_user('user', 'mail@example.com', 'password')
u.is_staff = True
u.save()
self.client.login(username='user', password='password')
# invalid form submission
r = self.client.post(reverse('texting_client_create'), {'name': 'a name', 'description': ''})
self.assertEqual(r.status_code, 200)
self.assertFormError(r, 'client_form', 'description', 'This field is required.')
self.assertFalse(Client.objects.all())
# valid form submission
r = self.client.post(reverse('texting_client_create'), {'name': 'a name', 'description': 'a description'})
self.assertEqual(r.status_code, 302)
c = Client.objects.get()
self.assertEqual(c.name, 'a name')
self.assertEqual(c.description, 'a description')
def atest_client_create_with_logo(self):
logo_file = open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_logo.gif'))
r = self.client.post(
reverse('texting_client_create'),
{
'name': 'another name',
'description': 'another description',
'new_logo_image': logo_file
}
)
logo_file.close()
l = ClientLogo.objects.get()
c = Client.objects.get()
self.assertEqual(c.logo, l)
class TextingPromotionTestCase(TestCase):
def setUp(self):
self.client = Client.objects.create(name='client name', description='client description')
self.site = Site.objects.create(domain='example.com', name='test site')
def test_offer_form(self):
bad_data = [
({
'client': '',
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}, {'client': ['This field is required.']}),
({
'client': self.client.id,
'name': '',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}, {'name': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': '',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}, {'code': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}, {'sites': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '',
'enddate': '2011-06-20',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}, {'startdate': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}, {'enddate': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-13',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}, {None: ['Start date must be before end date.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
'texts_per_week': None,
'vibes_id': 'vibesid',
}, {'texts_per_week': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
'texts_per_week': 3,
'vibes_id': '',
}, {'vibes_id': ['This field is required.']}),
]
for (data, field_errors) in bad_data:
form = OfferForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), len(field_errors))
for (field, errors) in field_errors.items():
if field:
form_errors = form.errors[field]
else:
form_errors = form.non_field_errors()
self.assertEqual(len(form_errors), len(errors))
self.assertTrue(all(e in form_errors for e in errors))
valid_data = {
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
'texts_per_week': 3,
'vibes_id': 'vibesid',
}
form = OfferForm(valid_data)
self.assertTrue(form.is_valid())
def test_contest_form(self):
bad_data = [
({
'client': self.client.id,
'name': '',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
}, {'name': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': '',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
}, {'code': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
}, {'sites': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '',
'enddate': '2011-06-20',
}, {'startdate': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '',
}, {'enddate': ['This field is required.']}),
({
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-13',
}, {None: ['Start date must be before end date.']}),
]
for (data, field_errors) in bad_data:
form = ContestForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), len(field_errors))
for (field, errors) in field_errors.items():
if field:
form_errors = form.errors[field]
else:
form_errors = form.non_field_errors()
self.assertEqual(len(form_errors), len(errors))
self.assertTrue(all(e in form_errors for e in errors))
valid_data = {
'client': self.client.id,
'name': 'promo name',
'code': 'codecode',
'sites': [self.site.id],
'startdate': '2011-06-13',
'enddate': '2011-06-20',
}
form = ContestForm(valid_data)
self.assertTrue(form.is_valid())
class TextingViewsTestCase(TestCase):
def test_home(self):
r = self.client.get(reverse('texting_home'))
self.assertEqual(r.status_code, 200)
def test_client_list(self):
r = self.client.get(reverse('texting_client_list'))
self.assertEqual(r.status_code, 200)
def test_client_detail(self):
r = self.client.get(reverse('texting_client_detail', kwargs={'client_id': 1}))
self.assertEqual(r.status_code, 404)
c = Client.objects.create(name='client name', description='description')
r = self.client.get(reverse('texting_client_detail', kwargs={'client_id': c.id}))
self.assertEqual(r.status_code, 200)
self.assertTrue(r.context['client'])
r.context['offers']
r.context['contests']
def test_offers_list(self):
r = self.client.get(reverse('texting_offers_list'))
self.assertEqual(r.status_code, 200)
def test_offers_manage(self):
r = self.client.get(reverse('texting_offers_manage'))
self.assertEqual(r.status_code, 302)
u = User.objects.create_user('user', 'mail@example.com', '')
u.is_staff = True
u.save()
self.client.login(username='user', password='')
r = self.client.get(reverse('texting_offers_manage'))
self.assertEqual(r.status_code, 200)
|
995,742 | 2ed8e2ee99e70d4fb88d5df1c1c6b74a3e7671d3 | # coding: utf-8
import os
import sys
import time
class Solution:
def largestPalindrome_cheat(self, n):
"""
:type n: int
:rtype: int
"""
if n == 1: return 9
if n == 2: return 987
if n == 3: return 123 # 913 993
if n == 4: return 597 # 9901 9999
if n == 5: return 677 # 99681 99979
if n == 6: return 1218 # 999001 999999
if n == 7: return 877 # 9997647 9998017
if n == 8: return 475 # 99990001 99999999
def largestPalindrome(self, n):
"""
:type n: int
:rtype: int
"""
if n==1:
return 9
if n==2:
return 987
for a in range(2, 9*10**(n-1)):
hi=(10**n)-a
lo=int(str(hi)[::-1])
if a**2-4*lo < 0:
continue
if (a**2-4*lo)**.5 == int((a**2-4*lo)**.5):
return (lo+10**n*(10**n-a))%1337
def main():
argv = sys.argv
argc = len(argv)
if argc < 2:
print("Usage: python {0} <testdata.txt>".format(argv[0]))
exit(0)
if not os.path.exists(argv[1]):
print("{0} not found...".format(argv[1]))
exit(0)
testDataFile = open(argv[1], "r")
lines = testDataFile.readlines()
for temp in lines:
temp = temp.strip()
if temp == "":
continue
print("args = {0}".format(temp))
loop_main(temp)
# print("Hit Return to continue...")
# input()
def loop_main(temp):
var_str = temp.replace("[","").replace("]","").rstrip()
n = int(var_str)
sl = Solution()
time0 = time.time()
result = sl.largestPalindrome(n)
print("result = {0}".format(result))
time1 = time.time()
print("Execute time ... : {0:f}[s]\n".format(time1 - time0))
if __name__ == "__main__":
main()
|
995,743 | 4a65745b1eeb2c861614426e234903664809beb4 |
import random
import string
def random_string_generator(size=10, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(size))
|
995,744 | f14143967b93928db51a0faea40936ff200f7a47 | import telebot
from telebot import types
from pycoingecko import CoinGeckoAPI
from py_currency_converter import convert
import time
import stockquotes
############## PYPI
#### CRYPTOCURRENCY: pip install pycoingecko
#### FIAT: pip install py-currency-converter
#### STOCKS: pip install stockquotes
bot = telebot.TeleBot('1944273740:AAEfH-Jxe0XVz141nX5LSSUZ1dTbVwr9XYI')
cg = CoinGeckoAPI()
SICK = True
@bot.message_handler(commands=['start'])
def start_bot(message):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
button1 = types.KeyboardButton('Курс крипты ₿')
button2 = types.KeyboardButton('Курс фиата 💲')
button3 = types.KeyboardButton('Информация 📜')
button4 = types.KeyboardButton('Главная 🌍')
button5 = types.KeyboardButton('Курс акций 📈')
markup.add(button4, button1, button2, button5, button3)
bot.send_message(message.chat.id, 'Привет, {0.first_name}. '
'Я рад тебя видеть! :)'.format(message.from_user), reply_markup=markup)
'''BUTTONS CRYPTOCURRENCY, FIAT AND etc'''
# FROM COINGECKO
crypto_course = cg.get_price(ids='bitcoin,ethereum,cardano,binancecoin,solana,ripple,dogecoin,'
'polkadot,terra-luna,uniswap,chainlink,litecoin,'
'bitcoin-cash,algorand', vs_currencies='usd')
crypto = "Курс криптовалют:\n" + f"\n· Bitcoin: {crypto_course['bitcoin']['usd']} $" \
+ f"\n· Ethereum: {crypto_course['ethereum']['usd']} $" \
+ f"\n· Cardano: {crypto_course['cardano']['usd']} $" \
+ f"\n· Binance Coin: {crypto_course['binancecoin']['usd']} $" \
+ f"\n· Solana: {crypto_course['solana']['usd']} $" \
+ f"\n· XRP: {crypto_course['ripple']['usd']} $" \
+ f"\n· Dogecoin: {crypto_course['dogecoin']['usd']} $" \
+ f"\n· Polkadot: {crypto_course['polkadot']['usd']} $" \
+ f"\n· Terra: {crypto_course['terra-luna']['usd']} $" \
+ f"\n· Uniswap: {crypto_course['uniswap']['usd']} $" \
+ f"\n· ChainLink: {crypto_course['chainlink']['usd']} $" \
+ f"\n· Litecoin: {crypto_course['litecoin']['usd']} $" \
+ f"\n· Bitcoin Cash: {crypto_course['bitcoin-cash']['usd']} $" \
+ f"\n· Algorand: {crypto_course['algorand']['usd']} $" \
+ "\n\nУзнать больше /information"
#FROM CONVERT
fiat_course = convert(amount=1, to=['RUB', 'EUR', 'UAH', 'AUD', 'BGN', 'BRL', 'GBP', 'ISK',
'KZT', 'MXN', 'NOK', 'CZK', 'JPY', 'PHP', 'CNY', 'RON', 'INR'])
fiat = "Курс фиата:\n" + f"\n•🇷🇺 1 USD в RUB {fiat_course['RUB']}"\
+ f"\n•🇪🇺 1 USD в EUR {fiat_course['EUR']}"\
+ f"\n•🇺🇦 1 USD в UAH {fiat_course['UAH']}"\
+ f"\n•🇦🇺 1 USD в AUD {fiat_course['AUD']}"\
+ f"\n•🇧🇬 1 USD в BGN {fiat_course['BGN']}"\
+ f"\n•🇧🇷 1 USD в BRL {fiat_course['BRL']}"\
+ f"\n•🇬🇧 1 USD в GBP {fiat_course['GBP']}"\
+ f"\n•🇮🇸 1 USD в ISK {fiat_course['ISK']}"\
+ f"\n•🇰🇿 1 USD в KZT {fiat_course['KZT']}"\
+ f"\n•🇲🇽 1 USD в MXN {fiat_course['MXN']}"\
+ f"\n•🇳🇴 1 USD в NOK {fiat_course['NOK']}"\
+ f"\n•🇨🇿 1 USD в CZK {fiat_course['CZK']}"\
+ f"\n•🇯🇵 1 USD в JPY {fiat_course['JPY']}"\
+ f"\n•🇵🇭 1 USD в PHP {fiat_course['PHP']}"\
+ f"\n•🇨🇳 1 USD в CNY {fiat_course['CNY']}"\
+ f"\n•🇷🇴 1 USD в RON {fiat_course['RON']}"\
+ f"\n•🇮🇳 1 USD в INR {fiat_course['INR']}" \
f"\n\nУзнать больше /information"
# INFORMATION
information = 'Здесь представлена вся информация о боте.\n\n' \
'• Команды:\n' \
'/start - Запустить бота.\n' \
'/15cmin - Отправлять курс криптовалют каждые 15 минут.\n' \
'/60cmin - Отправлять курс криптовалют каждые 60 минут.\n' \
'/24chours - Отправлять курс криптовалют каждые 24 часа.\n' \
'/stop_crypto - Отключить автоматизацию по криптовалюте.\n' \
'/15fmin - Отправлять курс фиата каждые 15 минут.\n' \
'/60fmin - Отправлять курс фиата каждые 60 минут.\n' \
'/24fhourse - Отправлять курс фиата каждые 24 часа.\n' \
'/stop_fiat - Отключить автоматизацию по фиату.\n\n' \
'• Отказ от ответственности:' \
'\nВсё, что предоставляет бот, предназначено только для информационных целей.' \
' Мы не рекомендуем владеть, продавать или покупать тот или инной актив.' \
# STONCKS
stocks = 'AAPL', 'MSFT', 'GOOGL', 'AMZN', 'FB', 'TSLA', 'NVDA', 'V', 'JNJ', 'PYPL'
apple, microsoft, google, amazon, facebook, tesla,\
nvidia, visa, johnson, paypal = stockquotes.Stock(stocks[0]), stockquotes.Stock(stocks[1]), \
stockquotes.Stock(stocks[2]), stockquotes.Stock(stocks[3]),\
stockquotes.Stock(stocks[4]), stockquotes.Stock(stocks[5]), \
stockquotes.Stock(stocks[6]), stockquotes.Stock(stocks[7]), \
stockquotes.Stock(stocks[8]), stockquotes.Stock(stocks[9])
AAPL, MSFT, GOOGL, AMZN, FB, TSLA, NVDA, V, JNJ,\
PYPL = apple.current_price, microsoft.current_price, google.current_price,\
amazon.current_price, facebook.current_price, tesla.current_price,\
nvidia.current_price, visa.current_price, johnson.current_price, \
paypal.current_price
course = f'· Apple Inc: {AAPL}$\n· Microsoft: {MSFT}$\n· Alphabet Inc (Google): {GOOGL}$' \
f'\n· Amazon Inc: {AMZN}$\n· Facebook Inc: {FB}$' \
f'\n· Tesla Inc: {TSLA}$\n· NVIDIA: {NVDA}$\n· Visa Inc: {V}$' \
f'\n· Johnson & Johnson: {JNJ}$\n· PayPal Inc: {PYPL}$' \
f'\n\nУзнать больше /information'
# MAIN
main = 'Следить за курсом криптовалют, фиата и акциями никогда не было так просто!\n' \
'\nВедь сейчас Bitcoin стоит ' + f'{crypto_course["bitcoin"]["usd"]} $, 1 доллар равняется ' + \
f'{fiat_course["RUB"]} ₽, а акция Apple Inc {AAPL}$.\n' + \
'\nС помощью бота, ты можешь следить за курсом криптовалют, фиата и акциями, а также настраивать,' \
' чтобы тебе курс приходил автоматически, подробнее: /information'
'''BUTTONS CRYPTOCURRENCY, FIAT AND etc'''
# TIME SETTINGS
@bot.message_handler(commands=['15cmin'])
def settings(message):
SICK = True
if message.text == "/15cmin":
bot.send_message(message.chat.id, "Отлично! Теперь вы будете получать"
" курс токенов каждые 15 минут.")
time.sleep(5)
while SICK:
if message.text == "/stop_crypto":
SICK = False
bot.send_message(message.chat.id, crypto + '\nПриостановить автоматизацию: /stop_crypto')
time.sleep(3)
@bot.message_handler(commands=['stop_crypto'])
def crypto_stop(message):
if message.text == "/stop_crypto":
bot.send_message(message.chat.id, 'Приостановлено')
@bot.message_handler(content_types=['text'])
def bot_send_message(message):
if message.chat.type == 'private':
if message.text == "Курс крипты ₿":
bot.send_message(message.chat.id, crypto)
elif message.text == 'Курс фиата 💲':
bot.send_message(message.chat.id, fiat)
elif message.text == 'Главная 🌍':
bot.send_message(message.chat.id, main)
elif message.text == 'Курс акций 📈':
bot.send_message(message.chat.id, course)
elif message.text == 'Информация 📜' or '/information':
bot.send_message(message.chat.id, information)
bot.polling() |
995,745 | 39f7e59f584dc4fd50c424039ec3e768d79faec6 | # This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2014-2015, Michigan State University.
# Copyright (C) 2015-2016, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=C0111,C0103,E1103,W0612
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import gzip
import os
import pytest
from . import khmer_tst_utils as utils
import khmer
import khmer.kfile
import khmer.utils
import screed
def test_interleave_read_stdout():
# create input files
infile1 = utils.get_test_data('paired-slash1.fq.1')
infile2 = utils.get_test_data('paired-slash1.fq.2')
# correct output
ex_outfile = utils.get_test_data('paired-slash1.fq')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2]
(stats, out, err) = utils.runscript(script, args)
with open(outfile, 'w') as ofile:
ofile.write(out)
n = 0
for r, q in zip(screed.open(ex_outfile), screed.open(outfile)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_interleave_read_seq1_fq():
# create input files
infile1 = utils.get_test_data('paired-slash1.fq.1')
infile2 = utils.get_test_data('paired-slash1.fq.2')
# correct output
ex_outfile = utils.get_test_data('paired-slash1.fq')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
utils.runscript(script, args)
n = 0
for r, q in zip(screed.open(ex_outfile), screed.open(outfile)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_interleave_read_badleft_badright():
# create input files
infile1 = utils.get_test_data('paired-broken.fq.badleft')
infile2 = utils.get_test_data('paired-broken.fq.badright')
# correct output
ex_outfile = utils.get_test_data('paired-broken.fq.paired_bad')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
utils.runscript(script, args)
n = 0
for r, q in zip(screed.open(ex_outfile), screed.open(outfile)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_interleave_reads_1_fq():
# test input files
infile1 = utils.get_test_data('paired.fq.1')
infile2 = utils.get_test_data('paired.fq.2')
# correct output
ex_outfile = utils.get_test_data('paired.fq')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
utils.runscript(script, args)
r = open(ex_outfile).read()
q = open(outfile).read()
assert r == q, (r, q)
def test_interleave_reads_no_reformat():
infile1 = utils.get_test_data('paired.fq.1')
infile2 = utils.get_test_data('paired.malformat.fq.2')
ex_outfile = utils.get_test_data('paired.malformat.fq')
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '--no-reformat', '-o', outfile]
utils.runscript(script, args)
r = open(ex_outfile).read()
q = open(outfile).read()
assert r == q, (r, q)
def test_interleave_reads_broken_fq():
# test input files
infile1 = utils.get_test_data('paired-broken.fq.1')
infile2 = utils.get_test_data('paired-broken.fq.2')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
status, out, err = utils.runscript(script, args, fail_ok=True)
assert status == 1
assert 'ERROR: Input files contain different number of records.' in err
def test_interleave_reads_broken_fq_2():
# test input files
infile1 = utils.get_test_data('paired-broken2.fq.1')
infile2 = utils.get_test_data('paired-broken2.fq.2')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
status, out, err = utils.runscript(script, args, fail_ok=True)
assert status == 1
assert "ERROR: This doesn't look like paired data!" in err
def test_interleave_reads_broken_fq_3():
# test input files
infile1 = utils.get_test_data('paired-broken3.fq.1')
infile2 = utils.get_test_data('paired-broken3.fq.2')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
status, out, err = utils.runscript(script, args, fail_ok=True)
assert status == 1
assert "ERROR: This doesn't look like paired data!" in err
def test_interleave_reads_broken_fq_5():
# test input files
infile1 = utils.get_test_data('paired-broken4.fq.1')
infile2 = utils.get_test_data('paired-broken4.fq.2')
# actual output file
outfile = utils.get_temp_filename('out.fq')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
status, out, err = utils.runscript(script, args, fail_ok=True)
assert status == 1
assert "ERROR: This doesn't look like paired data!" in err
def test_interleave_reads_2_fa():
# test input files
infile1 = utils.get_test_data('paired.fa.1')
infile2 = utils.get_test_data('paired.fa.2')
# correct output
ex_outfile = utils.get_test_data('paired.fa')
# actual output file
outfile = utils.get_temp_filename('out.fa')
script = 'interleave-reads.py'
args = [infile1, infile2, '-o', outfile]
utils.runscript(script, args)
n = 0
for r, q in zip(screed.open(ex_outfile), screed.open(outfile)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_split_paired_reads_1_fa():
# test input file
infile = utils.get_test_data('paired.fa')
ex_outfile1 = utils.get_test_data('paired.fa.1')
ex_outfile2 = utils.get_test_data('paired.fa.2')
# actual output files...
outfile1 = utils.get_temp_filename('paired.fa.1')
in_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('paired.fa.2', in_dir)
script = 'split-paired-reads.py'
args = [infile]
utils.runscript(script, args, in_dir)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_split_paired_reads_2_fq():
# test input file
infile = utils.get_test_data('paired.fq')
ex_outfile1 = utils.get_test_data('paired.fq.1')
ex_outfile2 = utils.get_test_data('paired.fq.2')
# actual output files...
outfile1 = utils.get_temp_filename('paired.fq.1')
in_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('paired.fq.2', in_dir)
script = 'split-paired-reads.py'
args = [infile]
utils.runscript(script, args, in_dir)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
def test_split_paired_reads_2_mixed_fq_require_pair():
# test input file
infile = utils.copy_test_data('paired-mixed.fq')
in_dir = os.path.dirname(infile)
script = 'split-paired-reads.py'
args = [infile]
status, out, err = utils.runscript(script, args, in_dir, fail_ok=True)
assert status == 1, status
assert "Unpaired reads found" in err
def test_split_paired_reads_2_stdin_no_out():
script = 'split-paired-reads.py'
args = ['-']
status, out, err = utils.runscript(script, args, fail_ok=True)
assert status == 1
assert "Accepting input from stdin; output filenames must " in err
def test_split_paired_reads_2_mixed_fq():
# test input file
infile = utils.copy_test_data('paired-mixed-2.fq')
in_dir = os.path.dirname(infile)
script = 'split-paired-reads.py'
args = ['-0', '/dev/null', infile]
status, out, err = utils.runscript(script, args, in_dir)
assert status == 0
assert "split 6 sequences (3 left, 3 right, 5 orphans)" in err, err
def test_split_paired_reads_2_mixed_fq_orphans_to_file():
# test input file
infile = utils.copy_test_data('paired-mixed-2.fq')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('out.fq')
script = 'split-paired-reads.py'
args = ['-0', outfile, infile]
status, out, err = utils.runscript(script, args, in_dir)
assert status == 0
assert "split 6 sequences (3 left, 3 right, 5 orphans)" in err, err
n_orphans = len([1 for record in screed.open(outfile)])
assert n_orphans == 5
n_left = len([1 for record in screed.open(infile + '.1')])
assert n_left == 3
n_right = len([1 for record in screed.open(infile + '.2')])
assert n_right == 3
for filename in [outfile, infile + '.1', infile + '.2']:
fp = gzip.open(filename)
try:
fp.read()
except IOError as e:
assert "Not a gzipped file" in str(e), str(e)
fp.close()
def test_split_paired_reads_2_mixed_fq_gzfile():
# test input file
infile = utils.copy_test_data('paired-mixed-2.fq')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('out.fq')
script = 'split-paired-reads.py'
args = ['-0', outfile, '--gzip', infile]
status, out, err = utils.runscript(script, args, in_dir)
assert status == 0
assert "split 6 sequences (3 left, 3 right, 5 orphans)" in err, err
n_orphans = len([1 for record in screed.open(outfile)])
assert n_orphans == 5
n_left = len([1 for record in screed.open(infile + '.1')])
assert n_left == 3
n_right = len([1 for record in screed.open(infile + '.2')])
assert n_right == 3
for filename in [outfile, infile + '.1', infile + '.2']:
fp = gzip.open(filename)
fp.read() # this will fail if not gzip file.
fp.close()
def test_split_paired_reads_2_mixed_fq_broken_pairing_format():
# test input file
infile = utils.copy_test_data('paired-mixed-broken.fq')
in_dir = os.path.dirname(infile)
script = 'split-paired-reads.py'
args = [infile]
status, out, err = utils.runscript(script, args, in_dir, fail_ok=True)
assert status == 1
assert "Unpaired reads found starting at 895:1:37:17593:9954" in err, err
def test_split_paired_reads_3_output_dir():
# test input file
infile = utils.get_test_data('paired.fq')
ex_outfile1 = utils.get_test_data('paired.fq.1')
ex_outfile2 = utils.get_test_data('paired.fq.2')
# actual output files...
testdir = utils.get_temp_filename('test')
output_dir = os.path.join(os.path.dirname(testdir), "out")
outfile1 = utils.get_temp_filename('paired.fq.1', output_dir)
outfile2 = utils.get_temp_filename('paired.fq.2', output_dir)
script = 'split-paired-reads.py'
args = ['--output-dir', output_dir, infile]
utils.runscript(script, args)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
def test_split_paired_reads_3_output_files():
# test input file
infile = utils.get_test_data('paired.fq')
ex_outfile1 = utils.get_test_data('paired.fq.1')
ex_outfile2 = utils.get_test_data('paired.fq.2')
# actual output files...
outfile1 = utils.get_temp_filename('xxx')
output_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('yyy', output_dir)
script = 'split-paired-reads.py'
args = ['-1', outfile1, '-2', outfile2, infile]
utils.runscript(script, args)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
def test_split_paired_reads_3_output_files_left():
# test input file
infile = utils.get_test_data('paired.fq')
ex_outfile1 = utils.get_test_data('paired.fq.1')
ex_outfile2 = utils.get_test_data('paired.fq.2')
# actual output files...
outfile1 = utils.get_temp_filename('xxx')
output_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('paired.fq.2', output_dir)
script = 'split-paired-reads.py'
args = ['-d', output_dir, '-1', outfile1, infile]
utils.runscript(script, args)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
def test_split_paired_reads_3_output_files_right():
# test input file
infile = utils.get_test_data('paired.fq')
ex_outfile1 = utils.get_test_data('paired.fq.1')
ex_outfile2 = utils.get_test_data('paired.fq.2')
# actual output files...
outfile1 = utils.get_temp_filename('paired.fq.1')
output_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('yyy', output_dir)
script = 'split-paired-reads.py'
args = ['-2', outfile2, '-d', output_dir, infile]
utils.runscript(script, args)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
def test_extract_paired_reads_unpaired():
# test input file
infile = utils.get_test_data('random-20-a.fa')
# actual output files...
outfile1 = utils.get_temp_filename('unpaired.pe.fa')
in_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('unpaired.se.fa', in_dir)
script = 'extract-paired-reads.py'
args = [infile]
(_, _, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert 'no paired reads!? check file formats...' in err, err
def test_extract_paired_reads_1_fa():
# test input file
infile = utils.get_test_data('paired-mixed.fa')
ex_outfile1 = utils.get_test_data('paired-mixed.fa.pe')
ex_outfile2 = utils.get_test_data('paired-mixed.fa.se')
# actual output files...
outfile1 = utils.get_temp_filename('paired-mixed.fa.pe')
in_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('paired-mixed.fa.se', in_dir)
script = 'extract-paired-reads.py'
args = [infile]
utils.runscript(script, args, in_dir)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_extract_paired_reads_2_fq():
# test input file
infile = utils.get_test_data('paired-mixed.fq')
ex_outfile1 = utils.get_test_data('paired-mixed.fq.pe')
ex_outfile2 = utils.get_test_data('paired-mixed.fq.se')
# actual output files...
outfile1 = utils.get_temp_filename('paired-mixed.fq.pe')
in_dir = os.path.dirname(outfile1)
outfile2 = utils.get_temp_filename('paired-mixed.fq.se', in_dir)
script = 'extract-paired-reads.py'
args = [infile]
utils.runscript(script, args, in_dir)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1),
screed.open(outfile1)):
n += 1
assert r.name == q.name, (r.name, q.name, n)
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2),
screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert r.quality == q.quality
assert n > 0
def test_extract_paired_reads_3_output_dir():
# test input file
infile = utils.get_test_data('paired-mixed.fa')
ex_outfile1 = utils.get_test_data('paired-mixed.fa.pe')
ex_outfile2 = utils.get_test_data('paired-mixed.fa.se')
# output directory
out_dir = utils.get_temp_filename('output')
script = 'extract-paired-reads.py'
args = [infile, '-d', out_dir]
utils.runscript(script, args)
outfile1 = os.path.join(out_dir, 'paired-mixed.fa.pe')
outfile2 = os.path.join(out_dir, 'paired-mixed.fa.se')
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_extract_paired_reads_4_output_files():
# test input file
infile = utils.get_test_data('paired-mixed.fa')
ex_outfile1 = utils.get_test_data('paired-mixed.fa.pe')
ex_outfile2 = utils.get_test_data('paired-mixed.fa.se')
# actual output files...
outfile1 = utils.get_temp_filename('out_pe')
outfile2 = utils.get_temp_filename('out_se')
script = 'extract-paired-reads.py'
args = [infile, '-p', outfile1, '-s', outfile2]
utils.runscript(script, args)
assert os.path.exists(outfile1), outfile1
assert os.path.exists(outfile2), outfile2
n = 0
for r, q in zip(screed.open(ex_outfile1), screed.open(outfile1)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
n = 0
for r, q in zip(screed.open(ex_outfile2), screed.open(outfile2)):
n += 1
assert r.name == q.name
assert r.sequence == q.sequence
assert n > 0
def test_extract_paired_reads_5_stdin_error():
script = 'extract-paired-reads.py'
args = ['-f', '/dev/stdin']
status, out, err = utils.runscript(script, args, fail_ok=True)
assert status == 1
assert "output filenames must be provided." in err
def test_read_bundler():
infile = utils.get_test_data('unclean-reads.fastq')
records = [r for r in khmer.ReadParser(infile)]
bundle = khmer.utils.ReadBundle(*records)
raw_seqs = (
'GGTTGACGGGGNNNAGGGGGCGGCTGACTCCGAGAGACAGCAGCCGCAGCTGTCGTCAGGGGATTTCCG'
'GGGCGGAGGCCGCAGACGCGAGTGGTGGAGG',
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCCGCAGCTGTCGTCAGGGGANNNCCG'
'GGGCGGAGGCCGCAGACGCGAGTGGTGGAGG',
)
cleaned_seqs = (
'GGTTGACGGGGAAAAGGGGGCGGCTGACTCCGAGAGACAGCAGCCGCAGCTGTCGTCAGGGGATTTCCG'
'GGGCGGAGGCCGCAGACGCGAGTGGTGGAGG',
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCCGCAGCTGTCGTCAGGGGAAAACCG'
'GGGCGGAGGCCGCAGACGCGAGTGGTGGAGG',
)
assert bundle.num_reads == 2
assert bundle.total_length == 200
for read, raw_seq, clean_seq in zip(bundle.reads, raw_seqs, cleaned_seqs):
assert read.sequence == raw_seq
assert read.cleaned_seq == clean_seq
def test_read_bundler_single_read():
infile = utils.get_test_data('single-read.fq')
records = [r for r in khmer.ReadParser(infile)]
bundle = khmer.utils.ReadBundle(*records)
assert bundle.num_reads == 1
assert bundle.reads[0].sequence == bundle.reads[0].cleaned_seq
def test_read_bundler_empty_file():
infile = utils.get_test_data('empty-file')
with pytest.raises(OSError):
records = [r for r in khmer.ReadParser(infile)]
def test_read_bundler_empty_list():
bundle = khmer.utils.ReadBundle(*[])
assert bundle.num_reads == 0
|
995,746 | 237c61b029e98b65dc2375ed8446f70cb7ac6478 | l=[1,2,3,4,5,6,7,8,9,10]
s=list(filter(lambda x:x%2==0,l))
print(s)
s1=list(filter(lambda x:x%2!=0,l))
print(s1)
"""
output:
-------
[2, 4, 6, 8, 10]
"""
|
995,747 | 3cd1f9d24feeceddb6a8bb5cd623ceaa18263d24 | import platform
import time
import collections
from twisted.internet import reactor,defer, task
from twisted.internet.protocol import Factory, Protocol
from twisted.internet.endpoints import SSL4ClientEndpoint
from twisted.internet.ssl import CertificateOptions
import MumbleControlProtocol
import MumbleVoiceProtocol
class _ControlFactory(Factory):
def __init__(self,client):
self.mumbleClient=client
def buildProtocol(self,addr):
return MumbleControlProtocol.MumbleControlProtocol(self.mumbleClient)
class MumbleSettings(object):
"""
Object to hold settings passed to a MumbleClient.
Settings used by the base client are:
.host defaults to "localhost". At this time MumbleClient is ipv4 only
.port defaults to 64738
.nickname defaults to "MumblePythonBot"
.password defaults to "None"
.SSLOptions By default a new instance of twisted.internet.ssl.CertificateOptions
You can assign to a custom instance to provide a client certificate
and/or verify the server certificate. See the twisted documentation for details
You can pass in implementation-specific settings in this object. They will be ignored by the base client.
"""
def __init__(self):
"""
Sets defaults for all required options. These can be altered as required, and implementation-specific
settings added
"""
self.host="localhost"
"""
:annotation = "localhost":
defaults to "localhost". At this time MumbleClient is ipv4 only
"""
self.port=64738
self.nickname="MumblePythonBot"
self.SSLOptions=CertificateOptions()
self.password=None
class User(object):
"""Stores all information known about a user at this time"""
pass
class _MumbleState(object):
def __init__(self):
self.numTCPPings=0
self.avgTCPPing=0
self.users=collections.defaultdict(User)
class Foo(object):
pass
class MumbleClient(object):
"""
An object representing a mumble client which uses twisted as an event and network handler.
This should be inherited and methods overridden or implemented to create specific clients.
Client life-cycle:
- Client will connect to TCP control protocol on the specified host and port.
.controlConnected is a twisted Deferred that will make a callback when this occurs
- After a sucessfull connection, client will try to send three control messages
:meth:`versionMessage`, :meth:`authenticationMessage` and
:meth:`codecVersionMessage` are called in order and the results sent.
To alter the contents of these messages (assuming no setting exists), it
is probably easiest to override the function, call the parent to get the
"base" message, and alter what you wish
- The server should then send channel and user information
- The server will then send a ServerSync message. This triggers the
.clientConnected callback, and the :meth:`ServerSyncReceived`
method is called. At this time the .sessionID variable is set
- Every 5 seconds, the :meth:`pingMessage`: method is called, and
the message returned sent to the server
- When the client disconnects, the .clientDisconnected Deferred is
triggered (probably via errback and not callback) and the
:meth:`connectionLost` method called.
In general, the client is informed of activity via method calls. Outside the object, the program is
informed of (some) activity via the 3 Deferred objects, with more details functionality being the
responsiblility of the implementer
In general, if a message Foo is received by the client, the method FooReceived(self,message) will be called.
See MumbleControlProtocol for a list of MessageTypes. Some are implemented in this class and can be
overridden; some are not needed for base functionality but will be called if defined. The exception is
the UDPTunnel message, which is one of two possible ways voice data can be received. In these cases
the :meth:`VoiceMessageRecieved` function is called whether the voice source was UDP or TCP.
To tell if a message affects you, compare the message's session (target) or, optionally, actor (source)
to self.sessionID. Note that you cannot react to events (cannot send arbitrary messages) until
just before :meth:`ServerSyncReceived` has been called, which sets sessionID.
(Note that UDP is not currently supported)
"""
def __init__(self,settings=None):
self.sessionID=None
if settings is None: settings = MumbleSettings()
self.settings=settings
self.state=_MumbleState()
self.clientConnected = defer.Deferred()
self.clientDisconnected = defer.Deferred()
def connect(self):
"""
Asks the client to connect the control protocol
Returns a deferred which will callback when the connection suceeds.
This can also be accessed via the controlConnected attribute.
Note that this only indicates a TCP connection, which might, if the
client supplies incorrect authentication details, be immediately closed.
The clientConnected attribute is a Deferred which will return when
the login phase is complete.
:return: A Deferred
"""
self.point=SSL4ClientEndpoint(reactor, self.settings.host, self.settings.port,self.settings.SSLOptions)
self.controlConnected = self.point.connect(_ControlFactory(self))
return self.controlConnected
def _controlMessageReceived(self,type,name,messageObject):
#print(type,name,messageObject,str(dir(messageObject)))
#print(type,name,messageObject)
try:
f = getattr(self,"_"+name+"Received")
except AttributeError:
f=None
if callable(f): f(messageObject)
try:
f = getattr(self,name+"Received")
except AttributeError:
f = None
if callable(f): f(messageObject)
def _PingReceived(self,message):
now = int(time.time()*1000000)
timestamp = message.timestamp
if timestamp == self.state.lastTCPTimeStamp:
self.state.avgTCPPing = (now - timestamp) / 1000.0
def _ServerSyncReceived(self,message):
self.sessionID=message.session
self.clientConnected.callback(True)
def _UserStateReceived(self,message):
user = self.state.users[message.session]
for i in message.ListFields():
name = i[0].name
value = i[1]
if name != "actor": setattr(user,name,value)
#print("adding k=%s v=%s for session=%s" % (name,value,message.session))
def _UserRemoveReceived(self,message):
self.state.users.pop(message.session,None)
def _TCPVoiceMessageReceived(self,data):
prefix,session,data = MumbleVoiceProtocol.decodeAudioMessage(data)
self.VoiceMessageReceived(prefix,session,data,TCP=True)
def _unknownMessageReceived(self,type,data):
pass
def _connectionMade(self):
self.state.initialTime=time.time()
self.sendMessage(self.versionMessage())
self.sendMessage(self.authenticationMessage())
self.sendMessage(self.codecVersionMessage())
self.state.pingTask = task.LoopingCall(self._pingTask)
self.state.pingTaskDeferred = self.state.pingTask.start(5.0,now=False)
self.state.pingTaskDeferred.addErrback(self.errorCallback)
def _connectionLost(self,reason):
self.clientDisconnected.callback(reason)
self.connectionLost(reason)
def _pingTask(self):
self.sendMessage(self.pingMessage())
def ServerSyncReceived(self,message):
"""
Called upon receipt of a ServerSync message
By the time this is called, sessionID will be set to the client's
session and the clientConnected callback will have completed.
:param message: A ServerSync message object
"""
def sendVoiceMessage(self,data):
"""
Send a voice message via the active voice channel
:param str data: A stream of bytes
This will send data either via the TCP control channel or the UDP
voice channel if the latter is active. Data should be a stream of
bytes in mumble's voice format.
"""
self.controlProtocol.sendVoiceMessage(data)
def connectionLost(self,reason):
"""
Called when a connection is lost to the control protocol
This is called after the clientDisconnected callback is returned.
Implementors can override this method or listen for the callback,
depending on use-case
"""
pass
def VoiceMessageReceived(self,prefix,session,data,TCP=False):
"""
Called when voice data is received
:param str prefix: The one-byte message header indicating codec type and if this
was a standard or direct transmission
:param int session: The session ID of the source of the transmission
:param str data: The voice data, consisting of a series of voice frames and
optionally positional audio at the end
If you simply wish to output the voice data again, you can call
sendVoiceMessage and pass in prefix + data as the data.
Implementors should override this method to process voice data.
"""
pass
def sendMessage(self,message):
"""
Send a control message
If you call this method with :obj:`None`, it will ignore the call. This
behavior is so implementers can cancel the sending of an automatic
message (like Ping) by overriding :meth:`pingMessage` and returning
None
:param message: Any kind of TCP control message, or :obj:`None`
"""
if message is not None: self.controlProtocol.sendMessage(message)
def sendTextMessage(self,message):
tm = MumbleControlProtocol.TextMessage()
tm.actor = self.sessionID
tm.channel_id.append(self.channelID)
tm.message = message
self.sendMessage(tm)
def versionMessage(self):
"""
Called by the client to ask what message to send when it should send
a Version message on initial connect.
To change the message it's probably easiest to call the superclass
method and alter the appropriate fields.
:return: a Version object
"""
message = MumbleControlProtocol.Version()
message.release="1.2.5"
message.version=66053
message.os=platform.system()
message.os_version="evebot1.0.2"
return message
def authenticationMessage(self):
"""
Called by the client to ask what message to send when it should send
an Authenticate message on initial connect.
To change the message it's probably easiest to call the superclass
method and alter the appropriate fields.
:return: an Authenticate object
"""
message = MumbleControlProtocol.Authenticate()
message.username=self.settings.nickname
if self.settings.password is not None: message.password=self.settings.password
message.celt_versions.append(-2147483637)
message.celt_versions.append(-2147483632)
message.opus=True
return message
def codecVersionMessage(self):
"""
Called by the client to ask what message to send when it should send
a CodecVersion message on initial connect.
To change the message it's probably easiest to call the superclass
method and alter the appropriate fields.
:return: a CodecVersion object
"""
message = MumbleControlProtocol.CodecVersion()
message.alpha=-2147483637
message.beta=0
message.prefer_alpha=True
return message
def pingMessage(self):
"""
Called by the client to ask what message to send when it should send
a Ping message every five seconds.
To change the message it's probably easiest to call the superclass
method and alter the appropriate fields.
:return: a Ping object
"""
message = MumbleControlProtocol.Ping()
timestamp = int(time.time()*1000000)
message.timestamp=timestamp
message.good=0
message.late=0
message.lost=0
message.resync=0
message.udp_packets=0
message.tcp_packets=self.state.numTCPPings
message.udp_ping_avg=0
message.udp_ping_var=0.0
message.tcp_ping_avg=self.state.avgTCPPing
message.tcp_ping_var=0
self.state.numTCPPings+=1
self.state.lastTCPTimeStamp=timestamp
return message
def disconnect(self):
"""
Ask the client to disconnect the control channel
When sucessful, the .clientDisconnected Deferred is triggered (probably via errback and not callback)
"""
self.controlProtocol.disconnect()
def errorCallback(self,result):
print result
return result
class AutoChannelJoinClient(MumbleClient):
def ChannelStateReceived(self,message):
if message.name==self.settings._autojoin_joinChannel:
self.channelID = message.channel_id
print(message.name,message.channel_id,message.description)
def _ServerSyncReceived(self,message):
super(AutoChannelJoinClient,self)._ServerSyncReceived(message)
#MumbleClient._ServerSyncReceived(self,message)
newMessage = MumbleControlProtocol.UserState()
newMessage.session = self.sessionID
newMessage.channel_id=self.channelID
self.sendMessage(newMessage)
if __name__ == '__main__':
c = MumbleClient()
# c.connect()
def stop(reason):
reactor.stop()
c.clientDisconnected.addBoth(stop)
reactor.run()
|
995,748 | d1921f642da4fa56bf3879d61ebf5009f854bdd6 | import time
if __name__ == "__main__":
start_indexing_time = time.time()
'''
Insert indexing functionality
'''
end_indexing_time = time.time()
start_retrieval_time = time.time()
'''
Insert retrieval functionality
'''
end_retrieval_time = time.time()
elapsed_index_time = end_indexing_time - start_indexing_time
elapsed_retrieval_time = end_retrieval_time - start_retrieval_time
elapsed_time = elapsed_retrieval_time + elapsed_index_time
print('Total running time: ', elapsed_time)
print('Indexing running time: ', elapsed_index_time)
print('Retrieval running time: ', elapsed_retrieval_time)
|
995,749 | 44e249876f0bd5aca94dc32337cd2e718f6e302a | from django import forms
from django.core.exceptions import ValidationError
from datetime import date
import requests
import pandas as pd
class Ticker(forms.Form):
ticker = forms.CharField(label='股票代碼', initial='0050')
start_date = forms.DateField(label='開始日期', initial='2020-01-01', widget=forms.DateInput(attrs={'type':'date'}))
end_date = forms.DateField(label='結束日期', initial=date.today().strftime("%Y-%m-%d"), widget=forms.DateInput(attrs={'type':'date'}))
def clean(self):
cleaned_data = super().clean()
start_date = cleaned_data["start_date"]
end_date = cleaned_data["end_date"]
ticker = cleaned_data['ticker']
url = "https://api.finmindtrade.com/api/v3/data"
parameter = {
"dataset": "TaiwanStockInfo",
}
resp = requests.get(url, params=parameter)
data = resp.json()
stock_id = pd.DataFrame(data["data"])
if end_date < start_date:
msg = "開始日期需早於結束日期"
raise forms.ValidationError(msg)
today_date = date.today()
if end_date > today_date:
msg = "結束日期不應大於今天日期"
raise forms.ValidationError(msg)
if ticker not in stock_id['stock_id'].values:
msg = "無此股票代碼"
raise forms.ValidationError(msg)
|
995,750 | b9cf0747c6e106acfd8a31cd3d5dde37effd0820 | import re
brojReg = re.compile(r'(\d{3})-(\d{3}-\d{3})')
recenica = 'Mojot domasen broj e 032-382-941, a mobilniot e 078-357-145'
broevi = brojReg.findall(recenica)
for i, j in broevi:
print(i)
print(j)
print(i + '-' + j)
print()
|
995,751 | 636ade553eb5d21c1b46ce90c58571844b36a19b | # Copyright (c) 2021 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""Defines a circle."""
import numpy as np
from .base_classes import Shape2D
class Circle(Shape2D):
"""A circle with the given radius.
Args:
radius (float):
Radius of the circle.
center (Sequence[float]):
The coordinates of the centroid of the circle (Default
value: (0, 0, 0)).
Example:
>>> circle = coxeter.shapes.circle.Circle(radius=1.0, center=(1, 1, 1))
>>> import numpy as np
>>> assert np.isclose(circle.area, np.pi)
>>> circle.centroid
array([1, 1, 1])
>>> assert np.isclose(circle.circumference, 2 * np.pi)
>>> circle.eccentricity
0
>>> circle.gsd_shape_spec
{'type': 'Sphere', 'diameter': 2.0}
>>> circle.iq
1
>>> assert np.isclose(circle.perimeter, 2 * np.pi)
>>> assert np.allclose(
... circle.planar_moments_inertia,
... (5. / 4. * np.pi, 5. / 4. * np.pi, np.pi))
>>> assert np.isclose(circle.polar_moment_inertia, 5. / 2. * np.pi)
>>> circle.radius
1.0
"""
def __init__(self, radius, center=(0, 0, 0)):
self.radius = radius
self.centroid = center
@property
def gsd_shape_spec(self):
"""dict: Get a :ref:`complete GSD specification <gsd:shapes>`.""" # noqa: D401
return {"type": "Sphere", "diameter": 2 * self.radius}
@property
def centroid(self):
""":math:`(3, )` :class:`numpy.ndarray` of float: Get or set the centroid of the shape.""" # noqa: E501
return self._centroid
@centroid.setter
def centroid(self, value):
self._centroid = np.asarray(value)
@property
def radius(self):
"""float: Get the radius of the circle."""
return self._radius
@radius.setter
def radius(self, value):
if value > 0:
self._radius = value
else:
raise ValueError("Radius must be greater than zero.")
def _rescale(self, scale):
"""Multiply length scale.
Args:
scale (float):
Scale factor.
"""
self.radius *= scale
@property
def area(self):
"""float: Get the area of the circle."""
return np.pi * self.radius ** 2
@area.setter
def area(self, value):
if value > 0:
self.radius = np.sqrt(value / np.pi)
else:
raise ValueError("Area must be greater than zero.")
@property
def eccentricity(self):
"""float: Get the eccentricity of the circle.
This is 0 by definition for circles.
"""
return 0
@property
def perimeter(self):
"""float: Get the perimeter of the circle."""
return 2 * np.pi * self.radius
@perimeter.setter
def perimeter(self, value):
if value > 0:
self.radius = value / (2 * np.pi)
else:
raise ValueError("Perimeter must be greater than zero.")
@property
def circumference(self):
"""float: Get the circumference, alias for `Circle.perimeter`."""
return self.perimeter
@circumference.setter
def circumference(self, value):
self.perimeter = value
@property
def planar_moments_inertia(self):
r"""list[float, float, float]: Get the planar and product moments of inertia.
Moments are computed with respect to the :math:`x` and :math:`y`
axes. In addition to the two planar moments, this property also
provides the product of inertia.
The `planar moments <https://en.wikipedia.org/wiki/Polar_moment_of_inertia>`__
and the
`product <https://en.wikipedia.org/wiki/Second_moment_of_area#Product_moment_of_area>`__
of inertia are defined by the formulas:
.. math::
\begin{align}
I_x &= {\int \int}_A y^2 dA = \frac{\pi}{4} r^4 = \frac{Ar^2}{4} \\
I_y &= {\int \int}_A x^2 dA = \frac{\pi}{4} r^4 = \frac{Ar^2}{4} \\
I_{xy} &= {\int \int}_A xy dA = 0 \\
\end{align}
These formulas are given
`here <https://en.wikipedia.org/wiki/List_of_second_moments_of_area>`__. Note
that the product moment is zero by symmetry.
""" # noqa: E501
area = self.area
i_x = i_y = area / 4 * self.radius ** 2
i_xy = 0
# Apply parallel axis theorem from the centroid
i_x += area * self.centroid[0] ** 2
i_y += area * self.centroid[1] ** 2
i_xy += area * self.centroid[0] * self.centroid[1]
return i_x, i_y, i_xy
def is_inside(self, points):
"""Determine whether a set of points are contained in this circle.
.. note::
Points on the boundary of the shape will return :code:`True`.
Args:
points (:math:`(N, 3)` :class:`numpy.ndarray`):
The points to test.
Returns:
:math:`(N, )` :class:`numpy.ndarray`:
Boolean array indicating which points are contained in the
circle.
Example:
>>> circle = coxeter.shapes.Circle(1.0)
>>> circle.is_inside([[0, 0, 0], [20, 20, 20]])
array([ True, False])
"""
points = np.atleast_2d(points) - self.centroid
return np.logical_and(
np.linalg.norm(points, axis=-1) <= self.radius,
# At present circles are not orientable, so the z position must
# match exactly.
np.isclose(points[:, 2], 0),
)
@property
def iq(self):
"""float: The isoperimetric quotient.
This is 1 by definition for circles.
"""
return 1
def distance_to_surface(self, angles): # noqa: D102
return np.ones_like(angles) * self.radius
@property
def minimal_bounding_circle(self):
""":class:`~.Circle`: Get the smallest bounding circle."""
return Circle(self.radius, self.centroid)
@property
def minimal_centered_bounding_circle(self):
""":class:`~.Circle`: Get the smallest bounding concentric circle."""
return Circle(self.radius, self.centroid)
@property
def maximal_bounding_circle(self):
""":class:`~.Circle`: Get the largest bounded circle."""
return Circle(self.radius, self.centroid)
@property
def maximal_centered_bounded_circle(self):
""":class:`~.Circle`: Get the largest bounded concentric circle."""
return Circle(self.radius, self.centroid)
def __repr__(self):
return (
f"coxeter.shapes.Circle(radius={self.radius}, "
f"center={self.centroid.tolist()})"
)
def _plato_primitive(self, backend):
return backend.Disks(
positions=np.array([self.center[:2]]),
colors=np.array([[0.5, 0.5, 0.5, 1]]),
radii=[self.radius],
)
|
995,752 | e105f56f1db0cc38e78ebf496285d1686dbd50a7 | from .europian_option_fdm import EuropianOptionImplicitFDM |
995,753 | 6e4186a4bb50d42010dee10eacbc1346eb38dbc7 | # Brianna Atayan - 1632743 - batayan@ucsc.edu
# Colin Maher - 1432169 - csmaher@ucsc.edu
# Lily Nguyen - 1596857 - lnguye78@ucsc.edu
import argparse, pandas, sys, string, numpy, sklearn.metrics, performance_metrics, word_category_counter
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import VotingClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from scipy import sparse
#nltk.download() # uncomment this line to automatically install NLTK library
# default file path for training data
TRAINING_DATA_PATH = "train.csv"
# default file path for testing data
TESTING_DATA_PATH = "testset_1.csv"
# default file path for output of predictions
OUTPUT_PATH = "output.csv"
# default file path for output of performance metrics
OUTPUT_PERFORMANCE_PATH = "output_performance.txt"
# declares classifies that will be trained and used for testing
# global so all functions can access
naiveBayesModel = MultinomialNB()
linearSVCModel = LinearSVC(penalty = 'l1', dual = False)
logRegModel = LogisticRegression(C = 1.5, solver = 'lbfgs', multi_class = 'multinomial', random_state = 1, max_iter=1000)
def trainClassifiers(features, labels):
"""
Trains multiple classifiers with training set
Parameters are the features set matrix and the labels of the instances
Returns the trained model
"""
# trains each classifier on given training set
classArr = VotingClassifier(estimators = [('NB', naiveBayesModel), ('linSVC', linearSVCModel), ('LR', logRegModel)], \
voting = 'hard', weights = [1, 5, 3])
classArr = classArr.fit(features, labels)
return classArr
def tokenize(phrase_str):
"""
Performs tokenization and some preprocessing operations on text data.
Converts a phrase into a list of words, removes punctuation, removes
non-alphabetic tokens, and lemmatizes the tokens
Returns the list of tokens
"""
phrase = phrase_str.split(' ') # tokenize string by space character
mapping = str.maketrans('', '', string.punctuation)
# remove punctuation, remove non-alphabetic tokens, stem tokens
phrase = [WordNetLemmatizer().lemmatize(token.translate(mapping)) for token in phrase \
if token.translate(mapping).isalpha()]
return phrase
liwc_categories = [
'Total Pronouns', 'Total Function Words', 'Personal Pronouns', 'First Person Singular', 'First Person Plural',
'Second Person', 'Third Person Singular', 'Third Person Plural', ' Impersonal Pronouns', 'Articles', 'Common Verbs',
'Auxiliary Verbs', 'Past Tense', 'Present Tense', 'Future Tense', 'Adverbs', 'Prepositions', 'Conjunctions',
'Negations', 'Quantifiers', 'Number', 'Swear Words', 'Social Processes', 'Family', 'Friends', 'Humans',
'Affective Processes', 'Positive Emotion', 'Negative Emotion', 'Anxiety', 'Anger', 'Sadness', 'Cognitive Processes',
'Insight', 'Causation', 'Discrepancy', 'Tentative', 'Certainty', 'Inhibition', 'Inclusive', 'Exclusive',
'Perceptual Processes', 'See', 'Hear', 'Feel', 'Biological Processes', 'Body', 'Health', 'Sexual', 'Ingestion',
'Relativity', 'Motion', 'Space', 'Time', 'Work', 'Achievement', 'Leisure', 'Home', 'Money', 'Religion', 'Death',
'Assent', 'Nonfluencies', 'Fillers', 'Total first person', 'Total third person', 'Positive feelings',
'Optimism and energy', 'Communication', 'Other references to people', 'Up', 'Down', 'Occupation', 'School',
'Sports', 'TV','Music','Metaphysical issues', 'Physical states and functions', 'Sleeping', 'Grooming']
def get_liwc_features(train_data, test_data):
"""
Creates a LIWC feature extractor.
NOTE: this function is currently not being used in this program.
"""
print("getting liwc features")
train_liwc_matrix = []
test_liwc_matrix = []
for phrase in train_data:
liwc_scores = word_category_counter.score_text(phrase)
feature_vector = []
for key in liwc_categories:
if key in liwc_scores.keys():
# print(key)
# print(liwc_scores[key])
feature_vector.append(liwc_scores[key])
else:
feature_vector.append(0)
# print(feature_vector)
train_liwc_matrix.append(feature_vector)
for phrase in test_data:
liwc_scores = word_category_counter.score_text(phrase)
feature_vector = []
for key in liwc_categories:
if key in liwc_scores.keys():
# print(key)
# print(liwc_scores[key])
feature_vector.append(liwc_scores[key])
else:
feature_vector.append(0)
test_liwc_matrix.append(feature_vector)
# print(train_liwc_matrix)
return sparse.csr_matrix(train_liwc_matrix), sparse.csr_matrix(test_liwc_matrix)
def get_ngram_features(train_data, test_data):
"""
Creates a bag of words unigram/bigram feature extractor.
Fits the extractor to the training data, then applies the extractor to both
the training data and the test data.
Parameters are the training instances and testing instances (just the text phrases)
as Series.
Returns the extracted feature sets of the training and test data, as matrices.
"""
print("getting ngram features")
ngram_vectorizer = CountVectorizer(ngram_range = (1, 2))
ngram_vectorizer = ngram_vectorizer.fit(train_data)
return ngram_vectorizer.transform(train_data), ngram_vectorizer.transform(test_data)
def get_idf_features(train_data, test_data):
"""
Creates a tfidf unigram/bigram feature extractor.
Fits the extractor to the training data, then applies the extractor to both
the training data and the test data.
Parameters are the training instances and testing instances (just the text phrases)
as Series.
Returns the extracted feature sets of the training and test data, as matrices.
"""
tfidf = TfidfVectorizer(tokenizer = tokenize, ngram_range = (1, 2))
tfidf.fit(train_data)
return tfidf.transform(train_data), tfidf.transform(test_data)
def get_all_features(train_data, test_data):
"""
Calls all feature extractor methods to obtain the different feature sets.
Parameters are the training instances and testing instances (just the text phrases)
as Series.
Returns the combined extracted feature sets of the training and test data, as a matrix.
"""
#train_wc_matrix, test_wc_matrix = get_word_count_features(train_data, test_data)
train_idf_matrix, test_idf_matrix = get_idf_features(train_data, test_data)
train_ngram_matrix, test_ngram_matrix = get_ngram_features(train_data, test_data)
# train_liwc_matrix, test_liwc_matrix = get_liwc_features(train_data, test_data)
return sparse.hstack([train_idf_matrix, train_ngram_matrix]), \
sparse.hstack([test_idf_matrix, test_ngram_matrix])
def main():
#### read in command-line arguments, if any ####
parser = argparse.ArgumentParser(description = "program to predict the "
"sentiment of a given phrase")
parser.add_argument("--train", dest = "trainFile", \
default = TRAINING_DATA_PATH, type = str, \
help = "the path to the .csv file containing the training data")
parser.add_argument("--test", dest = "testFile", \
default = TESTING_DATA_PATH, type = str, \
help = "the path to the .csv file containing the test data")
parser.add_argument("--out", dest = "outFile", default = OUTPUT_PATH, \
type = str, help = "the path of the output file")
parser.add_argument("--perf", dest = "perfFile", default = \
OUTPUT_PERFORMANCE_PATH, type = str, help = "the path of the performance "
"output file")
args = parser.parse_args()
#### read training and testing data into a pandas dataframe ####
try:
train_data_df = pandas.read_csv(args.trainFile)
except FileNotFoundError:
print("Error: Training file does not exist. File must be of type csv")
sys.exit(1)
except:
print("Error: Unknown error occurred trying to read train data file")
sys.exit(1)
try:
test_data_df = pandas.read_csv(args.testFile)
except FileNotFoundError:
print("Error: Testing file does not exist. File must be of type csv")
sys.exit(1)
except:
print("Error: Unknown error occurred trying to read test data file")
sys.exit(1)
#### preprocessing & feature extraction ####
train_feature_set, test_feature_set = get_all_features(train_data_df["Phrase"], test_data_df["Phrase"])
print("finished getting features")
# training
model = trainClassifiers(train_feature_set, train_data_df["Sentiment"].tolist())
print("finished training")
# test
predictions_df = pandas.DataFrame(model.predict(test_feature_set))
predictions_df = pandas.concat([test_data_df["PhraseId"], predictions_df], axis = 1)
predictions_df.to_csv(path_or_buf = args.outFile, header = ["PhraseId", "Sentiment"], index = False)
# write performance stats to txt file
#perf_out_file = open(args.perfFile, "w")
#performance_metrics.get_performance_train(model, train_feature_set, train_data_df["Sentiment"].tolist(), perf_out_file, True)
#performance_metrics.get_performance_cv(model, train_feature_set, train_data_df["Sentiment"].tolist(), perf_out_file, 3)
#perf_out_file.close()
if __name__ == '__main__':
main()
|
995,754 | 979547a13da14ff3b95f8e90564d25200904e025 | from lxml import etree as ET
# xml.etree.ElementTree as ET does not work,
# as we use components that are only available in lxml
__version__ = '0.1'
METS_NS = "http://www.loc.gov/METS/"
XLIN_NS = "http://www.w3.org/1999/xlink"
mets_nsmap = {
'mets': METS_NS,
}
xlin_nsmap = {
'xlin': XLIN_NS
}
ET.register_namespace('mets', METS_NS)
ET.register_namespace('xlin', XLIN_NS)
strict = True
def initialise_values(element, attribs_list):
for key in element.attrib:
if key in attribs_list:
if key in ['href', 'arcrole', 'title', 'show', 'acutate', 'to',
'FROM']:
element.set("{%s}%s" % (XLIN_NS, key), element.attrib[key])
del element.attrib[key]
elif key not in attribs_list:
if hasattr(element, "TAG"):
print("WARN: {} not allowed in element {}".format(
key, element.TAG))
elif hasattr(element, "tag"):
print("WARN: {} not allowed in element {}".format(
key, element.tag))
else:
print("WARN: problem with {}".format(key))
if strict:
del element.attrib[key]
class Mets(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}mets'
def tounicode(self, pretty_print=False):
return ET.tounicode(self, pretty_print=pretty_print)
def tostring(self, pretty_print=False, encoding="unicode"):
return ET.tostring(self, pretty_print=pretty_print, encoding=encoding)
def write(self, filename, pretty_print=False, encoding="unicode"):
if encoding in ["unicode"]:
with open(filename, 'w') as f:
f.write(ET.tostring(self, pretty_print=pretty_print,
encoding=encoding))
else:
with open(filename, 'wb') as f:
f.write(ET.tostring(self, pretty_print=pretty_print,
encoding=encoding))
# Generic parent classes
class MetsHdr(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}metsHdr'
def _init(self):
initialise_values(self, ['ID', 'ADMID', 'CREATEDATE', 'LASTMODDATE',
'RECORDSTATUS'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attib['ID'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ADMID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
@property
def CREATEDATE(self):
return self.attrib['CREATEDATE']
@CREATEDATE.setter
def CREATEDATE(self, value):
self.attrib['CREATEDATE'] = value
@property
def LASTMODDATE(self):
return self.attrib['LASTMODDATE']
@LASTMODDATE.setter
def LASTMODDATE(self, value):
self.attrib['LASTMODDATE'] = value
@property
def RECORDSTATUS(self):
return self.attrib['RECORDSTATUS']
@RECORDSTATUS.setter
def RECORDSTATUS(self, value):
self.attrib['RECORDSTATUS'] = value
class DmdSec(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}dmdSec'
def _init(self):
initialise_values(self, ['ID'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
class AmdSec(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}amdSec'
def _init(self):
initialise_values(self, ['ID'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
class BehaviorSec(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}behaviorSec'
def _init(self):
initialise_values(self, ['ID'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
# Generic children of MetsHdr Parent
class Agent(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}agent'
def _init(self):
initialise_values(self, ['ID', 'ROLE', 'OTHERROLE', 'TYPE', 'OTHERTYPE'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def ROLE(self):
return self.attrib['ROLE']
@ROLE.setter
def ROLE(self, value):
self.attrib['ROLE'] = value
@property
def OTHERROLE(self):
return self.attrib['OTHERROLE']
@OTHERROLE.setter
def OTHERROLE(self, value):
self.attrib['OTHERROLE'] = value
@property
def TYPE(self):
return self.attrib['TYPE']
@TYPE.setter
def TYPE(self, value):
self.attrib['TYPE'] = value
@property
def OTHERTYPE(self):
return self.attrib['OTHERTYPE']
@OTHERTYPE.setter
def othertype(self, value):
self.attrib['OTHERTYPE'] = value
class Name(ET.ElementBase):
'''A subelement of Agent. No attributes can be given - only a text value
for the element.
'''
TAG = '{http://www.loc.gov/METS/}name'
class Note(ET.ElementBase):
'''A subelement of Agent. No attributes can be given - only a text value
for the element.
'''
TAG = '{http://www.loc.gov/METS/}note'
class AltRecordID(ET.ElementBase):
'''A subelement of metsHdr. Allows one to use alternative record
identifier values for the digital object represented by the METS document;
the primary record identifier is stored in the OBJID attribute in the root
<mets> document.
'''
TAG = '{http://www.loc.gov/METS/}altRecordID'
def _init(self):
initialise_values(self, ['ID', 'TYPE'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def TYPE(self):
return self.attrib['TYPE']
@TYPE.setter
def TYPE(self, value):
self.attrib['TYPE'] = value
class MetsDocumentId(ET.ElementBase):
'''A subelement of metsHdr. Accepted attributes are ID and TYPE.'''
TAG = '{http://www.loc.gov/METS/}metsDocumentID'
def _init(self):
initialise_values(self, ['ID', 'TYPE'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def TYPE(self):
return self.attrib['TYPE']
@TYPE.setter
def TYPE(self, value):
self.attrib['TYPE'] = value
class MdRef(ET.ElementBase):
'''The metadata reference element <mdRef> element is a generic element
used throughout the METS schema to provide a pointer to metadata which
resides outside the METS document. NB: <mdRef> is an empty element. The
location of the metadata must be recorded in the xlink:href attribute,
supplemented by the XPTR attribute as needed.'''
TAG = '{http://www.loc.gov/METS/}mdRef'
def __init__(self):
initialise_values(self, ['ID', 'LABEL', 'XPTR', 'LOCTYPE',
'OTHERLOCTYPE', 'MDTYPE', 'OTHERMDTYPE', 'MDTYPEVERSION',
'MIMETYPE', 'SIZE', 'CREATED','CHECKSUM', 'CHECKSUMTYPE', 'href'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def LABEL(self):
return self.attrib['LABEL']
@LABEL.setter
def LABEL(self, value):
self.attrib['LABEL'] = value
@property
def XPTR(self):
return self.attrib['XPTR']
@XPTR.setter
def XPTR(self, value):
self.attrib['XPTR'] = value
@property
def LOCTYPE(self):
return self.attrib['LOCTYPE']
@LOCTYPE.setter
def LOCTYPE(self, value):
self.attrib['LOCTYPE'] = value
@property
def OTHERLOCTYPE(self):
return self.attrib['OTHERLOCTYPE']
@OTHERLOCTYPE.setter
def OTHERLOCTYPE(self, value):
self.attrib['OTHERLOCTYPE'] = value
@property
def MDTYPE(self):
return self.attrib['MDTYPE']
@MDTYPE.setter
def MDTYPE(self, value):
self.attrib['MDTYPE'] = value
@property
def OTHERMDTYPE(self):
return self.attrib['OTHERMDTYPE']
@OTHERMDTYPE.setter
def OTHERMDTYPE(self, value):
self.attrib['OTHERMDTYPE'] = value
@property
def MDTYPEVERSION(self):
return self.attrib['MDTYPEVERSION']
@MDTYPEVERSION.setter
def MDTYPEVERSION(self, value):
self.attrib['MDTYPEVERSION'] = value
@property
def MIMETYPE(self):
return self.attrib['MIMETYPE']
@MIMETYPE.setter
def MIMETYPE(self, value):
self.attrib['MIMETYPE'] = value
@property
def SIZE(self):
return self.attrib['SIZE']
@SIZE.setter
def SIZE(self, value):
self.attrib['SIZE'] = value
@property
def CREATED(self):
return self.attrib['CREATED']
@CREATED.setter
def CREATED(self, value):
self.attrib['CREATED'] = value
@property
def CHECKSUM(self):
return self.attrib['CHECKSUM']
@CHECKSUM.setter
def CHECKSUM(self, value):
self.attrib['CHECKSUM'] = value
@property
def CHECKSUMTYPE(self):
return self.attrib['CHECKSUMTYPE']
@CHECKSUMTYPE.setter
def CHECKSUMTYPE(self, value):
self.attrib['CHECKSUMTYPE'] = value
@property
def href(self):
return self.attrib['{http://www.w3.org/1999/xlink}href']
@href.setter
def href(self, value):
self.attrib['{http://www.w3.org/1999/xlink}href'] = value
class MdWrap(ET.ElementBase):
'''A subelement of dmdSec, techMD, rightsMD, sourceMD and digiProvMd. It
is used to wrap metadata from other schemas, such as PREMIS.
'''
TAG = '{http://www.loc.gov/METS/}mdWrap'
def _init(self):
initialise_values(self, ['ID', 'LABEL',
# Metadata attribute group
'MDTYPE', 'OTHERMDTYPE', 'MDTYPEVERSION', 'MIMETYPE',
'SIZE', 'CREATED', 'CHECKSUM', 'CHECKSUMTYPE'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def LABEL(self):
return self.attrib['LABEL']
@LABEL.setter
def LABEL(self, value):
self.attrib['LABEL'] = value
@property
def MDTYPE(self):
return self.attrib['MDTYPE']
@MDTYPE.setter
def MDTYPE(self, value):
self.attrib['MDTYPE'] = value
@property
def OTHERMDTYPE(self):
return self.attrib['OTHERMDTYPE']
@OTHERMDTYPE.setter
def OTHERMDTYPE(self, value):
self.attrib['OTHERMDTYPE'] = value
@property
def MDTYPEVERSION(self):
return self.attrib['MDTYPEVERSION']
@MDTYPEVERSION.setter
def MDTYPEVERSION(self, value):
self.attrib['MDTYPEVERSION'] = value
@property
def MIMETYPE(self):
return self.attrib['MIMETYPE']
@MIMETYPE.setter
def MIMETYPE(self, value):
self.attrib['MIMETYPE'] = value
@property
def SIZE(self):
return self.attrib['SIZE']
@SIZE.setter
def SIZE(self, value):
self.attrib['SIZE'] = value
@property
def CREATED(self):
return self.attrib['CREATED']
@CREATED.setter
def CREATED(self, value):
self.attrib['CREATED'] = value
@property
def CHECKSUM(self):
return self.attrib['CHECKSUM']
@CHECKSUM.setter
def CHECKSUM(self, value):
self.attrib['CHECKSUM'] = value
@property
def CHECKSUMTYPE(self):
return self.attrib['CHECKSUMTYPE']
@CHECKSUMTYPE.setter
def CHECKSUMTYPE(self, value):
self.attrib['CHECKSUMTYPE'] = value
class XmlData(ET.ElementBase):
'''The xml data wrapper element <xmlData> is used to contain XML encoded
metadata. The content of an <xmlData> element can be in any namespace or
in no namespace. As permitted by the XML Schema Standard, the
processContents attribute value for the metadata in an <xmlData> is set to
"lax". Therefore, if the source schema and its location are identified by
means of an XML schemaLocation attribute, then an XML processor will
validate the elements for which it can find declarations. If a source schema
is not identified, or cannot be found at the specified schemaLocation, then
an XML validator will check for well-formedness, but otherwise skip over
the elements appearing in the <xmlData> element.
'''
TAG = '{http://www.loc.gov/METS/}xmlData'
class BinData(ET.ElementBase):
'''The binary data wrapper element <binData> is used to contain Base64
encoded metadata.
'''
TAG = '{http://www.loc.gov/METS/}binData'
# Generic children of AMD Parent
class MdExt(ET.ElementBase):
'''Generic parent class of techMd, rightsMD, sourceMD and digiprovMd. Not
intended to be called directly.
'''
def _init(self, element_name=None):
self.tag = '{http://www.loc.gov/METS/}' + element_name
initialise_values(self, ['ID', 'ADMID', 'CREATED', 'STATUS'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ADMID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
@property
def CREATED(self):
return self.attrib['CREATED']
@CREATED.setter
def CREATED(self, value):
self.attrib['CREATED'] = value
@property
def STATUS(self):
return self.attrib['STATUS']
@STATUS.setter
def STATUS(self, value):
self.attrib['STATUS'] = value
class TechMd(MdExt):
def _init(self, **kwargs):
super(TechMd, self)._init("techMD", **kwargs)
class RightsMd(MdExt):
def _init(self, **kwargs):
super(RightsMd, self)._init("rightsMD", **kwargs)
class SourceMd(MdExt):
def _init(self, **kwargs):
super(SourceMd, self)._init("sourceMD", **kwargs)
class DigiprovMd(MdExt):
def _init(self, **kwargs):
super(DigiprovMd, self)._init("digiprovMD", **kwargs)
class FileSec(ET.ElementBase):
'''The overall purpose of the content file section element <fileSec> is to
provide an inventory of and the location for the content files that
comprise the digital object being described in the METS document.
'''
TAG = '{http://www.loc.gov/METS/}fileSec'
class FileGrp(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}fileGrp'
def _init(self):
initialise_values(self, ['ID', 'VERSDATE', 'ADMID', 'USE'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def VERSDATE(self):
return self.attrib['VERSDATE']
@VERSDATE.setter
def VERSDATE(self, value):
self.attrib['VERSDATE'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ADMID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
@property
def USE(self):
return self.attrib['USE']
@USE.setter
def USE(self, value):
self.attrib['USE'] = value
class File(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}file'
def _init(self):
initialise_values(self, ['ID', 'SEQ', 'OWNERID', 'ADMID', 'DMDID',
'GROUPID', 'USE', 'BEGIN', 'END', 'BETYPE'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def SEQ(self):
return self.attrib['SEQ']
@SEQ.setter
def SEQ(self, value):
self.attrib['SEQ'] = value
@property
def OWNERID(self):
return self.attrib['OWNERID']
@OWNERID.setter
def OWNERID(self, value):
self.attrib['OWNERID'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ADMID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
@property
def DMDID(self):
return self.attrib['DMDID']
@DMDID.setter
def DMDID(self, value):
self.attrib['DMDID'] = value
@property
def USE(self):
return self.attrib['USE']
@USE.setter
def USE(self, value):
self.attrib['USE'] = value
@property
def BEGIN(self):
return self.attrib['BEGIN']
@BEGIN.setter
def BEGIN(self, value):
self.attrib['BEGIN'] = value
@property
def END(self):
return self.attrib['END']
@END.setter
def END(self, value):
self.attrib['END'] = value
@property
def BETYPE(self):
return self.attrib['BETYPE']
@BETYPE.setter
def BETYPE(self, value):
self.attrib['BETYPE'] = value
class FLocat(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}FLocat'
def _init(self):
initialise_values(self, ['ID', 'USE', 'LOCTYPE', 'OTHERLOCTYPE', 'href'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def USE(self):
return self.attrib['USE']
@USE.setter
def USE(self, value):
self.attrib['USE'] = value
@property
def LOCTYPE(self):
return self.attrib['LOCTYPE']
@LOCTYPE.setter
def LOCTYPE(self, value):
self.attrib['LOCTYPE'] = value
@property
def OTHERLOCTYPE(self):
return self.attrib['OTHERLOCTYPE']
@OTHERLOCTYPE.setter
def OTHERLOCTYPE(self, value):
self.attrib['OTHERLOCTYPE'] = value
@property
def href(self):
return self.attrib['{http://www.w3.org/1999/xlink}href']
@href.setter
def href(self, value):
self.attrib['{http://www.w3.org/1999/xlink}href'] = value
class FContent(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}FContent'
def _init(self):
initialise_values(self, ['ID', 'USE'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def USE(self):
return self.attrib['USE']
@USE.setter
def USE(self, value):
self.attrib['USE'] = value
class Stream(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}stream'
def _init(self):
# "streamType" is something of an anomaly here, as it is the only
# attribute whose spelling is in camelCase (except for xlin:href).
# As such, we should case for if it is supplied in all-caps.
for attrib_value in self.attrib:
if attrib_value == 'STREAMTYPE':
self.attrib['streamType'] = self.attrib[attrib_value]
del self.attrib[attrib_value
]
initialise_values(self, ['ID', 'OWNERID', 'ADMID', 'DMDID', 'BEGIN',
'END', 'BETYPE', 'streamType'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def OWNERID(self):
return self.attrib['OWNERID']
@OWNERID.setter
def OWNERID(self, value):
self.attrib['OWNERID'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ADMID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
@property
def DMDID(self):
return self.attrib['DMDID']
@DMDID.setter
def DMDID(self, value):
self.attrib['DMDID'] = value
@property
def BEGIN(self):
return self.attrib['BEGIN']
@BEGIN.setter
def BEGIN(self, value):
self.attrib['BEGIN'] = value
@property
def END(self):
return self.attrib['END']
@END.setter
def END(self, value):
self.attrib['END'] = value
@property
def BETYPE(self):
return self.attrib['BETYPE']
@BETYPE.setter
def BETYPE(self, value):
self.attrib['BETYPE'] = value
@property
def streamType(self):
return self.attrib['streamType']
@streamType.setter
def streamType(self, value):
self.attrib['streamType'] = value
class TransformFile(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}transformFile'
def _init(self):
initialise_values(self, ['ID', 'TRANSFORMTYPE', 'TRANSFORMALGORITHM',
'TRANSFORMKEY', 'TRANSFORMBEHAVIOR', 'TRANSFORMORDER'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def TRANSFORMTYPE(self):
return self.attrib['TRANSFORMTYPE']
@TRANSFORMTYPE.setter
def TRANSFORMTYPE(self, value):
self.attrib['TRANSFORMTYPE'] = value
@property
def TRANSFORMALGORITHM(self):
return self.attrib['TRANSFORMALGORITHM']
@TRANSFORMALGORITHM.setter
def TRANSFORMALGORITHM(self, value):
self.attrib['TRANSFORMALGORITHM'] = value
@property
def TRANSFORMKEY(self):
return self.attrib['TRANSFORMKEY']
@TRANSFORMKEY.setter
def TRANSFORMKEY(self, value):
self.attrib['TRANSFORMKEY'] = value
@property
def TRANSFORMBEHAVIOR(self):
return self.attrib['TRANSFORMBEHAVIOR']
@TRANSFORMBEHAVIOR.setter
def TRANSFORMBEHAVIOR(self, value):
self.attrib['TRANSFORMBEHAVIOR'] = value
@property
def TRANSFORMORDER(self):
return self.attrib['TRANSFORMORDER']
@TRANSFORMORDER.setter
def TRANSFORMORDER(self, value):
self.attrib['TRANSFORMORDER'] = value
# structMap classes
class StructMap(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}structMap'
def _init(self):
initialise_values(self, ['ID', 'TYPE', 'LABEL'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def TYPE(self):
return self.attrib['TYPE']
@TYPE.setter
def TYPE(self, value):
self.attrib['TYPE'] = value
@property
def LABEL(self):
return self.attrib['LABEL']
@LABEL.setter
def LABEL(self, value):
self.attrib['LABEL'] = value
class Div(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}div'
def _init(self):
initialise_values(self, ['ID', 'ORDER', 'ORDERLABEL', 'LABEL',
'DMDID', 'ADMID', 'TYPE', 'CONTENTIDS'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def ORDER(self):
return self.attrib['ORDER']
@ORDER.setter
def ORDER(self, value):
self.attrib['ORDER'] = value
@property
def ORDERLABEL(self):
return self.attrib['ORDERLABEL']
@ORDERLABEL.setter
def ORDERLABEL(self, value):
self.attrib['ORDERLABEL'] = value
@property
def LABEL(self):
return self.attrib['LABEL']
@LABEL.setter
def LABEL(self, value):
self.attrib['LABEL'] = value
@property
def DMDID(self):
return self.attrib['DMDID']
@DMDID.setter
def DMDID(self, value):
self.attrib['DMDID'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@DMDID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
@property
def TYPE(self):
return self.attrib['TYPE']
@TYPE.setter
def TYPE(self, value):
self.attrib['TYPE'] = value
@property
def CONTENTIDS(self):
return self.attrib['CONTENTIDS']
@CONTENTIDS.setter
def CONTENTIDS(self, value):
self.attrib['CONTENTIDS'] = value
class Mptr(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}mptr'
def _init(self):
initialise_values(self, ['ID', 'CONTENTIDS', 'LOCTYPE',
'OTHERLOCTYPE', ])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def CONTENTIDS(self):
return self.attrib['CONTENTIDS']
@CONTENTIDS.setter
def CONTENTIDS(self, value):
self.attrib['CONTENTIDS'] = value
@property
def LOCTYPE(self):
return self.attrib['LOCTYPE']
@LOCTYPE.setter
def LOCTYPE(self, value):
self.attrib['LOCTYPE'] = value
@property
def OTHERLOCTYPE(self):
return self.attrib['OTHERLOCTYPE']
@OTHERLOCTYPE.setter
def OTHERLOCTYPE(self, value):
self.attrib['OTHERLOCTYPE'] = value
class Fptr(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}fptr'
def _init(self):
initialise_values(self, ['ID', 'FILEID', 'CONTENTIDS'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def FILEID(self):
return self.attrib['FILEID']
@FILEID.setter
def FILEID(self, value):
self.attrib['FILEID'] = value
@property
def CONTENTIDS(self):
return self.attrib['CONTENTIDS']
@CONTENTIDS.setter
def CONTENTIDS(self, value):
self.attrib['CONTENTIDS'] = value
class Par(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}par'
def _init(self):
initialise_values(self, 'ID')
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
class Seq(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}seq'
def _init(self):
initialise_values(self, ['ID'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
class Area(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}area'
def _init(self):
initialise_values(self, ['ID', 'FILEID', 'SHAPE', 'COORDS',
'BEGIN', 'END', 'BETYPE', 'EXTENT', 'EXTTYPE', 'ADMID',
'CONTENTIDS'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def FILEID(self):
return self.attrib['FILEID']
@FILEID.setter
def FILEID(self, value):
self.attrib['FILEID'] = value
@property
def SHAPE(self):
return self.attrib['SHAPE']
@SHAPE.setter
def SHAPE(self, value):
self.attrib['SHAPE'] = value
@property
def COORDS(self):
return self.attrib['COORDS']
@COORDS.setter
def COORDS(self, value):
self.attrib['COORDS'] = value
@property
def BEGIN(self):
return self.attrib['BEGIN']
@BEGIN.setter
def BEGIN(self, value):
self.attrib['BEGIN'] = value
@property
def END(self):
return self.attrib['END']
@END.setter
def END(self, value):
self.attrib['END'] = value
@property
def BETYPE(self):
return self.attrib['BETYPE']
@BETYPE.setter
def BETYPE(self, value):
self.attrib['BETYPE'] = value
@property
def EXTENT(self):
return self.attrib['EXTENT']
@EXTENT.setter
def EXTENT(self, value):
self.attrib['EXTENT'] = value
@property
def EXTTYPE(self):
return self.attrib['EXTTYPE']
@EXTTYPE.setter
def EXTTYPE(self, value):
self.attrib['EXTTYPE'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ADMID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
@property
def CONTENTIDS(self):
return self.attrib['CONTENTIDS']
@CONTENTIDS.setter
def CONTENTIDS(self, value):
self.attrib['CONTENTIDS'] = value
class SmLink(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}smLink'
def _init(self):
initialise_values(self, ['ID', 'arcrole', 'title', 'show',
'actuate', 'to', 'from'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def arcrole(self):
return self.attrib['{http://www.w3.org/1999/xlink}arcrole']
@arcrole.setter
def arcrole(self, value):
self.attrib['{http://www.w3.org/1999/xlink}arcrole'] = value
@property
def title(self):
return self.attrib['{http://www.w3.org/1999/xlink}title']
@title.setter
def title(self, value):
self.attrib['{http://www.w3.org/1999/xlink}title'] = value
@property
def show(self):
return self.attrib['{http://www.w3.org/1999/xlink}show']
@show.setter
def show(self, value):
self.attrib['{http://www.w3.org/1999/xlink}show'] = value
@property
def actuate(self):
return self.attrib['{http://www.w3.org/1999/xlink}actuate']
@actuate.setter
def actuate(self, value):
self.attrib['{http://www.w3.org/1999/xlink}actuate'] = value
@property
def to(self):
return self.attrib['{http://www.w3.org/1999/xlink}to']
@to.setter
def to(self, value):
self.attrib['{http://www.w3.org/1999/xlink}to'] = value
@property
def FROM(self):
return self.attrib['{http://www.w3.org/1999/xlink}from']
@to.setter
def FROM(self, value):
self.attrib['{http://www.w3.org/1999/xlink}from'] = value
class SmLinkGrp(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}smLinkGrp'
def _init(self):
initialise_values(self, ['ID', 'ARCLINKORDER'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def ARCLINKORDER(self):
return self.attrib['ARCLINKORDER']
@ARCLINKORDER.setter
def ARCLINKORDER(self, value):
self.attrib['ARCLINKORDER'] = value
class SmLocatorLink(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}smLocatorLink'
def _init(self):
initialise_values(self, ['ID'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self):
self.attrib['ID'] = value
class SmArcLink(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}smArcLink'
def _init(self):
initialise_values(self, ['ID', 'ARCTYPE', 'ADMID'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self):
self.attrib['ID'] = value
@property
def ARCTYPE(self):
return self.attrib['ARCTYPE']
@ID.setter
def ARCTYPE(self):
self.attrib['ARCTYPE'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ID.setter
def ADMID(self):
self.attrib['ADMID'] = value
class Behavior(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}behavior'
def _init(self):
initialise_values(self, ['ID', 'STRUCTID', 'BTYPE', 'CREATED',
'LABEL', 'GROUPID', 'ADMID'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def STRUCTID(self):
return self.attrib['STRUCTID']
@STRUCTID.setter
def STRUCTID(self, value):
self.attrib['STRUCTID'] = value
@property
def BTYPE(self):
return self.attrib['BTYPE']
@BTYPE.setter
def BTYPE(self, value):
self.attrib['BTYPE'] = value
@property
def CREATED(self):
return self.attrib['CREATED']
@CREATED.setter
def CREATED(self, value):
self.attrib['CREATED'] = value
@property
def LABEL(self):
return self.attrib['LABEL']
@LABEL.setter
def LABEL(self, value):
self.attrib['LABEL'] = value
@property
def GROUPID(self):
return self.attrib['GROUPID']
@GROUPID.setter
def GROUPID(self, value):
self.attrib['GROUPID'] = value
@property
def ADMID(self):
return self.attrib['ADMID']
@ADMID.setter
def ADMID(self, value):
self.attrib['ADMID'] = value
class InterfaceDef(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}interfaceDef'
def _init(self):
initialise_values(self, ['ID', 'LABEL'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def LABEL(self):
return self.attrib['LABEL']
@LABEL.setter
def LABEL(self, value):
self.attrib['LABEL'] = value
class Mechanism(ET.ElementBase):
TAG = '{http://www.loc.gov/METS/}mechanism'
def _init(self):
initialise_values(self, ['ID', 'LABEL'])
@property
def ID(self):
return self.attrib['ID']
@ID.setter
def ID(self, value):
self.attrib['ID'] = value
@property
def LABEL(self):
return self.attrib['LABEL']
@LABEL.setter
def LABEL(self, value):
self.attrib['LABEL'] = value |
995,755 | 6e7401263fed66f6e8a303bc6aa91d99aef08864 | def gcd(x,y):
if x%y!=0:
return gcd(y,x%y)
else:
return y
def GCD(x,y,z):
return gcd(gcd(x,y),z)
ans=0
K=int(input())
for a in range(1,K+1):
for b in range(1,K+1):
for c in range(1,K+1):
ans+=GCD(a,b,c)
print(ans)
|
995,756 | 1bc55bd721211fb233de9c75f9d2366068b9eccd | from django.urls import path, include
from . import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter(trailing_slash=False)
router.register(r'', views.RatingViewSet, basename="rating")
urlpatterns = [
path('map/', views.get_cinema_width, name='get_cinema_width'),
path('map/<int:cinema_id>/movie/', views.get_fast_movie, name='get_fast_movie'),
path('<int:cinema_id>/', views.cinema_detail, name='cinema_detail'),
path('<int:cinema_id>/pick/', views.pick_cinema, name='pick_cinema'),
path('<int:cinema_id>/score/', views.get_cinema_rating_avg, name='get_cinema_rating_avg'),
path('rating/', views.create_cinema_rating, name='create_cinema_rating'),
path('rating/<int:rating_id>/', views.patch_delete_cinema_rating, name='patch_delete_cinema_rating'),
path('rating/page/', include(router.urls)),
]
|
995,757 | 47b05ad94c16cffe21214db567b0d61689fc0611 | '''Advent of Code 2015 day 1: Not Quite Lisp
https://adventofcode.com/2015/day/1'''
def process(instructions):
'''Generate a sequence of (position, floor) for the given instructions.
Start at floor 0; go up a floor for each '(' and down a floor for each ')'
The position counts from 1.
'''
floor = 0
for pos, instr in enumerate(instructions):
if instr == '(':
floor += 1
else:
floor -= 1
yield pos + 1, floor
def find_floor(instructions):
'''Find the floor Santa ends up on after following the instructions'''
*_, (_pos, floor) = process(instructions) # Unpacking per PEP 448!
return floor
def first_basement_entry(instructions):
'''Find the first instruction where we enter the basement
Return 0 if we never do.'''
for pos, floor in process(instructions):
if floor < 0:
return pos
return 0
def run(args): # pragma: no cover
filename = args[0]
with open(filename) as f:
data = f.read()
floor = find_floor(data)
pos = first_basement_entry(data)
print(f"Santa's floor: {floor}")
print(f"Santa's first entry to basement: {pos}")
|
995,758 | 25a43870555ef9a46d6ff9d308c3603b4daea152 | # !interpreter [optional-arg]
# -*- coding: utf-8 -*-
# Version
'''
{
Load the numpy array and calculate the features, then split the datasets and save them
including rebuild the joint order
}
{License_info}
'''
# Futures
# […]
# Built-in/Generic Imports
import os
import sys
import json
import numpy as np
# […]
# Libs
# import pandas as pd # Or any other
# […]
# Own module
# […]
if True: # Include project path
ROOT = os.path.dirname(os.path.abspath(__file__))+'/../'
CURR_PATH = os.path.dirname(os.path.abspath(__file__))+'/'
sys.path.append(ROOT)
import utils.uti_features_extraction as uti_features_extraction
import utils.uti_commons as uti_commons
def par(path): # Pre-Append ROOT to the path if it's not absolute
return ROOT + path if (path and path[0] != '/') else path
# -- Settings
with open(ROOT + 'config/config.json') as json_config_file:
config_all = json.load(json_config_file)
config = config_all['s3_pre_processing.py']
# common settings
ACTION_CLASSES = config_all['ACTION_CLASSES']
IMAGE_FILE_NAME_FORMAT = config_all['IMAGE_FILE_NAME_FORMAT']
SKELETON_FILE_NAME_FORMAT = config_all['SKELETON_FILE_NAME_FORMAT']
CLIP_NUM_INDEX = config_all['CLIP_NUM_INDEX']
ACTION_CLASS_INT_INEDX = config_all['ACTION_CLASS_INT_INEDX']
FEATURE_WINDOW_SIZE = config_all['FEATURE_WINDOW_SIZE']
TEST_DATA_SCALE = config_all['TEST_DATA_SCALE']
# input
ALL_DETECTED_SKELETONS = par(config['input']['ALL_DETECTED_SKELETONS'])
# output
FEATURES_TRAIN = par(config['output']['FEATURES_TRAIN'])
FEATURES_TEST = par(config['output']['FEATURES_TEST'])
# -- Functions
def load_numpy_array(ALL_DETECTED_SKELETONS):
''' Load the datasets from npz file
'''
numpy_array = np.load(ALL_DETECTED_SKELETONS)
skeletons = numpy_array['ALL_SKELETONS']
labels = numpy_array['ALL_LABELS']
action_class_int = []
video_clips = []
for i in range(len(labels)):
action_class_int.append(labels[i][ACTION_CLASS_INT_INEDX])
video_clips.append(labels[i][CLIP_NUM_INDEX])
action_class_int_ndarray = np.array(action_class_int, dtype='i')
video_clips_ndarray = np.array(video_clips, dtype='i')
return skeletons, action_class_int_ndarray, video_clips_ndarray
def convert_action_to_int(action, ACTION_CLASSES):
''' Convert the input action class name into the correspoding index intenger, may not need this function, because already stored the action label
as intenger in the first place
Arguments:
action {str}: filmed clips action name from text file.
ACTION_CLASSES {list}: all pre defined action classes in config/config.json
Return:
ACTION_CLASSES-index {int}: the index of the action
'''
if action in ACTION_CLASSES:
return ACTION_CLASSES.index(action)
def extract_features(
skeletons, labels, clip_number, window_size):
''' From image index and raw skeleton positions,
Extract features of body velocity, joint velocity, and normalized joint positions.
'''
positions_temp = []
velocity_temp = []
labels_temp = []
iClipsCounter = len(clip_number)
debuger_list = []
prev_clip = 0
# Loop through all data
for i, clip in enumerate(clip_number):
# If a new video clip starts, reset the feature generator
if i == 0 or clip != prev_clip:
Features_Generator = uti_features_extraction.Features_Generator(window_size)
# Get features
skeletons_rebuild = uti_features_extraction.rebuild_skeleton_joint_order_by_training(skeletons[i, :])
skeletons_rebuild_lists = np.array(skeletons_rebuild)
success, features_x, features_xs = Features_Generator.calculate_features(skeletons_rebuild_lists)
if success: # True if (data length > 5) and (skeleton has enough joints)
positions_temp.append(features_x)
velocity_temp.append(features_xs)
labels_temp.append(labels[i])
# Print
print(f'{i+1}/{iClipsCounter}', end=', ')
prev_clip = clip
positions_temp = np.array(positions_temp)
velocity_temp = np.array(velocity_temp)
labels_temp = np.array(labels_temp)
return positions_temp, velocity_temp, labels_temp
def shuffle_dataset(datasets_position, datasets_velocity, labels, test_percentage):
indices = np.random.permutation(labels.shape[0])
valid_cnt = int(labels.shape[0] * test_percentage)
test_idx, training_idx = indices[:valid_cnt], indices[valid_cnt:]
test_pos, train_pos = datasets_position[test_idx,:], datasets_position[training_idx,:]
test_labels, train_labels = labels[test_idx], labels[training_idx]
test_vel, train_vel = datasets_velocity[test_idx,:], datasets_velocity[training_idx]
return train_pos, train_vel, train_labels, test_pos, test_vel, test_labels
# -- Main
def main_function():
'''
Load skeleton data from `skeletons_info.txt`, process data,
and then save features and labels to .npz file.
'''
# Load data
skeletons, action_class_int, clip_number = load_numpy_array(ALL_DETECTED_SKELETONS )
# Process Features
print('\nExtracting time-serials features ...')
position, velocity, labels = extract_features(skeletons, action_class_int, clip_number, FEATURE_WINDOW_SIZE)
print(f'All Points.shape = {position.shape}, All Velocity.shape = {velocity.shape}')
position_train, velocity_train, label_train, position_test, velocity_test, label_test = shuffle_dataset(position, velocity, labels, TEST_DATA_SCALE)
print(f'Train Points.shape = {position_train.shape}, Train Velocity.shape = {velocity_train.shape}')
print(f'Test Points.shape = {position_test.shape}, Test Velocity.shape = {velocity_test.shape}')
# Save Features to npz file
np.savez(FEATURES_TRAIN, POSITION_TRAIN = position_train, VELOCITY_TRAIN = velocity_train, LABEL_TRAIN = label_train)
np.savez(FEATURES_TEST, POSITION_TEST = position_test, VELOCITY_TEST = velocity_test, LABEL_TEST = label_test)
if __name__ == '__main__':
main_function()
print('Programms End')
|
995,759 | f8581d1f2fe8dbc57613e746407a7d8382cdc428 | ## Exploratory Data Analysis cheat sheet
### Yasith Kariyawasam
## Distribution plots
sns.distplot(tips['total_bill'])
### remove kde layer using
sns.distplot(tips['total_bill'], kde=False)
## Joint Plot
sns.jointplot(x='variable1',y='variable2',data=data,kind='scatter')
## use kind to specify type of jointplot i.e 'kind = kde'
#Pair Plots
sbr.pairplot(df,hue='Clicked on Ad',palette='rainbow')
|
995,760 | 6e6f8a70b54599012195421d60b15c979f568baa | class Hotel():
def __init__(self, numero_maximo_de_huespedes, lugares_de_estacionamiento):
self.numero_maximo_de_huespedes = numero_maximo_de_huespedes
self.lugares_de_estacionamiento = lugares_de_estacionamiento
self.huespedes = 0
def anadir_huespedes(self, cantidad_huespedes):
self.huespedes += cantidad_huespedes
def checkout(self, cantidad_huespedes):
self.huespedes -= cantidad_huespedes
def ocupacion_total(self):
return self.huespedes
hotel_chincha = Hotel(numero_maximo_de_huespedes=50 , lugares_de_estacionamiento= 20)
print(f'El número de huespedes es: {hotel_chincha.ocupacion_total}')
hotel_chincha.anadir_huespedes(50)
print(f'El número de huespedes es: {hotel_chincha.ocupacion_total}')
hotel_chincha.checkout(30)
print(f'El número de huespedes es: {hotel_chincha.ocupacion_total}')
#print(hotel_chincha.numero_maximo_de_huespedes)
#print(hotel_chincha.lugares_de_estacionamiento) |
995,761 | 387d1be03e42e99191161660ab17565e4ade50c5 | print("Enter number: ")
num=int(input())
list=[]
sum=0
n=len(str(num))
for i in str(num):
list.append(int(i)**n)
for i in list:
sum+=i
print(sum)
if sum==num:
print("Number is an armstrong number")
else:
print("Number is not an armstrong number")
|
995,762 | 078a54e2774e7113b5fea021c8516a1fec7d2794 | ####### TOKENIZER
class Tokenizer(object):
def __init__(self):
self.stoi = {}
self.itos = {}
def __len__(self):
return len(self.stoi)
def fit_on_texts(self, texts):
vocab = set()
for text in texts:
vocab.update(text.split(' '))
vocab = sorted(vocab)
vocab.append('<sos>')
vocab.append('<eos>')
vocab.append('<pad>')
for i, s in enumerate(vocab):
self.stoi[s] = i
self.itos = {item[1]: item[0] for item in self.stoi.items()}
def text_to_sequence(self, text):
sequence = []
sequence.append(self.stoi['<sos>'])
for s in text.split(' '):
sequence.append(self.stoi[s])
sequence.append(self.stoi['<eos>'])
return sequence
def texts_to_sequences(self, texts):
sequences = []
for text in texts:
sequence = self.text_to_sequence(text)
sequences.append(sequence)
return sequences
def sequence_to_text(self, sequence):
return ''.join(list(map(lambda i: self.itos[i], sequence)))
def sequences_to_texts(self, sequences):
texts = []
for sequence in sequences:
text = self.sequence_to_text(sequence)
texts.append(text)
return texts
def predict_caption(self, sequence):
caption = ''
for i in sequence:
if i == self.stoi['<eos>'] or i == self.stoi['<pad>']:
break
caption += self.itos[i]
return caption
def predict_captions(self, sequences):
captions = []
for sequence in sequences:
caption = self.predict_caption(sequence)
captions.append(caption)
return captions |
995,763 | 257250de9f505df4491759ab22bd8634aebf0419 | # all axes tutorial
# This program will:
# clear blocks above 0,0,0
# place floor at 0,0,0
# place walls 100 blocks away in each direction
# place 5 block grid on ceiling
# set the player to 0,0,0
#
import mcpi.minecraft as minecraft
import mcpi.block as block
import time
import mcpi.minecraftstuff as minecraftstuff
mc = minecraft.Minecraft.create()
mcdrawing = minecraftstuff.MinecraftDrawing(mc)
pos = mc.player.getTilePos()
|
995,764 | 9442347dbfbc94a12b973e30efc15ee110a4bf8f | import unittest
from urlparse import parse_qs
from django.test.client import Client
from placethings.api.models import Thing
from placethings.settings import DOMAIN, MEDIA_ROOT
class MediaHandlerTest(unittest.TestCase):
def testimage_handler(self):
"""
Verifies that placing anonymously is working
"""
things = Thing.objects.all()
if len( things ):
thing = things[0]
else:
c = Client()
data = parse_qs( 'title=&tags=&lattitude=32.82248&longitude=-96.762986&duration=&parent=&privacy=U&lifespan=&format=txt' )
data[ 'media' ] = open( MEDIA_ROOT + 'unittest_image.jpg' )
c.post( '/api/place/', data )
thing = Thing.objects.all()[0]
uri = thing.media.replace( 'http://' + DOMAIN, '' )
c = Client()
response = c.get( uri )
self.failUnlessEqual(response.status_code, 200) |
995,765 | 6166a7613647ca3a549330c02310203c78a653e2 | from numpy import *
from matplotlib.pyplot import *
# Plot Drake Passage transport and ice shelf melt rates/mass loss for the third
# repetition of the spinup forcing (1992-2005), for both the low-res and
# high-res control simulations.
def timeseries_rep3_compare ():
# Paths to experiment directories
directory_head = '/short/y99/kaa561/FESOM/'
expt_dir = ['lowres_spinup/', 'highres_spinup/']
# Titles for plotting
expt_titles = ['low res', 'high res']
# Colours for plotting
expt_colours = ['blue', 'green']
year_start = 1992
year_end = 2005
# Skip the first 2 repetitions
skipyears = 28
# Number of records per year (assumes 5-day averages)
peryear = 365/5
# Bounds of observations for Drake Passage transport
dpt_low = 134
dpt_high = 173.3
# Titles for each ice shelf
names = ['All Ice Shelves', 'Larsen D Ice Shelf', 'Larsen C Ice Shelf', 'Wilkins & George VI & Stange Ice Shelves', 'Ronne-Filchner Ice Shelf', 'Abbot Ice Shelf', 'Pine Island Glacier Ice Shelf', 'Thwaites Ice Shelf', 'Dotson Ice Shelf', 'Getz Ice Shelf', 'Nickerson Ice Shelf', 'Sulzberger Ice Shelf', 'Mertz Ice Shelf', 'Totten & Moscow University Ice Shelves', 'Shackleton Ice Shelf', 'West Ice Shelf', 'Amery Ice Shelf', 'Prince Harald Ice Shelf', 'Baudouin & Borchgrevink Ice Shelves', 'Lazarev Ice Shelf', 'Nivl Ice Shelf', 'Fimbul & Jelbart & Ekstrom Ice Shelves', 'Brunt & Riiser-Larsen Ice Shelves', 'Ross Ice Shelf']
# Figure names for each ice shelf
fig_names = ['total_massloss.png', 'larsen_d.png', 'larsen_c.png', 'wilkins_georgevi_stange.png', 'ronne_filchner.png', 'abbot.png', 'pig.png', 'thwaites.png', 'dotson.png', 'getz.png', 'nickerson.png', 'sulzberger.png', 'mertz.png', 'totten_moscowuni.png', 'shackleton.png', 'west.png', 'amery.png', 'princeharald.png', 'baudouin_borchgrevink.png', 'lazarev.png', 'nivl.png', 'fimbul_jelbart_ekstrom.png', 'brunt_riiserlarsen.png', 'ross.png']
# Observed mass loss (Rignot 2013) and uncertainty for each ice shelf, in
# Gt/y
obs_massloss = [1325, 1.4, 20.7, 135.4, 155.4, 51.8, 101.2, 97.5, 45.2, 144.9, 4.2, 18.2, 7.9, 90.6, 72.6, 27.2, 35.5, -2, 21.6, 6.3, 3.9, 26.8, 9.7, 47.7]
obs_massloss_error = [235, 14, 67, 40, 45, 19, 8, 7, 4, 14, 2, 3, 3, 8, 15, 10, 23, 3, 18, 2, 2, 14, 16, 34]
# Make time axis
time = arange(year_start, year_end+1, 1.0/peryear)
num_time = size(time)
num_years = year_end - year_start + 1
# Drake Passage transport
dpt = empty([2, num_time])
# Loop over experiments
for expt in range(2):
# Read logfile
dpt_tmp = []
f = open(directory_head + expt_dir[expt] + 'dpt.log')
f.readline()
for line in f:
dpt_tmp.append(float(line))
f.close()
# Select third repetition
dpt_tmp = dpt_tmp[skipyears*peryear:(skipyears+num_years)*peryear]
dpt[expt,:] = array(dpt_tmp)
# Plot
fig, ax = subplots(figsize=(10,6))
for expt in range(2):
ax.plot(time, dpt[expt,:], label=expt_titles[expt], color=expt_colours[expt], linewidth=2)
# Add lines for range of observations
ax.axhline(dpt_low, color='red', linestyle='dashed', linewidth=2, label='observations')
ax.axhline(dpt_high, color='red', linewidth=2, linestyle='dashed')
title('Drake Passage Transport', fontsize=18)
xlabel('Year', fontsize=14)
ylabel('Sv', fontsize=14)
xlim([year_start, amax(time)])
grid(True)
# Move plot over to make room for legend
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width*0.8, box.height])
# Make legend
ax.legend(loc='center left', bbox_to_anchor=(1,0.5))
fig.savefig('drakepsgtrans.png')
# Ice shelf mass loss
massloss = empty([2, len(names), num_time])
# Loop over experiments
for expt in range(2):
# Read logfile
f = open(directory_head + expt_dir[expt] + 'massloss.log')
f.readline()
# Loop over ice shelves
for index in range(len(names)):
massloss_tmp = []
for line in f:
try:
massloss_tmp.append(float(line))
except(ValueError):
break
# Select third repetition
massloss_tmp = massloss_tmp[skipyears*peryear:(skipyears+num_years)*peryear]
massloss[expt,index,:] = array(massloss_tmp)
f.close()
# One plot for each ice shelf
for index in range(len(names)):
# Calculate range of observations
massloss_low = obs_massloss[index] - obs_massloss_error[index]
massloss_high = obs_massloss[index] + obs_massloss_error[index]
fig, ax = subplots(figsize=(10,6))
for expt in range(2):
ax.plot(time, massloss[expt,index,:], label=expt_titles[expt], color=expt_colours[expt], linewidth=2)
# Add lines for range of observations
ax.axhline(massloss_low, color='red', linestyle='dashed', linewidth=2, label='observations')
ax.axhline(massloss_high, color='red', linewidth=2, linestyle='dashed')
title(names[index] + '\nBasal Mass Loss', fontsize=18)
xlabel('Year', fontsize=14)
ylabel('Gt/y', fontsize=14)
xlim([year_start, amax(time)])
grid(True)
# Move plot over to make room for legend
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width*0.8, box.height])
# Make legend
ax.legend(loc='center left', bbox_to_anchor=(1,0.5))
fig.savefig(fig_names[index])
# Command-line interface
if __name__ == "__main__":
timeseries_rep3_compare()
|
995,766 | abdceabf3353032529001d576443cce654fc4e6e | import pandas as pd
dados = list()
pessoa = dict()
mulheres = list()
acima_da_media = list()
tot = 0
while True:
pessoa['nome'] = str(input('Nome: '))
while True:
pessoa['sexo'] = str(input('Sexo: [F/M]: ')).upper()
if pessoa['sexo'] == 'F' or pessoa['sexo'] == 'M':
break
else:
print('ERRO! Digite F para feminino ou M para masculino.')
pessoa['idade'] = int(input('Idade: '))
dados.append(pessoa.copy())
end = ' '
while end not in 'SN':
end = str(input('Deseja continuar? [S/N]')).upper()
if end == 'N' or end == 'S':
break
print('ERRO! Digite S para continuar ou N para encerrar.')
if end == 'N':
break
qntd_pessoas = len(dados)
for elemento in dados:
tot += elemento['idade']
media = tot / qntd_pessoas
for elemento in dados:
if elemento['sexo'] == 'F':
mulheres.append(elemento.copy())
if elemento['idade'] >= media:
acima_da_media.append(elemento.copy())
print(f'A) Ao todo temos {qntd_pessoas} pessoas cadastradas. ')
print(f'B) A média de idade é de {media:.2f}')
print(f'C) As mulheres cadastradas foram', end=' ')
for v in mulheres:
print(v['nome'], end=' ')
print('')
print(f'D) As pessoas com idade acima da média: ')
for elemento in acima_da_media:
for k, v in elemento.items():
print(f'{k} = {v};', end=' ')
print('')
df = pd.DataFrame(dados)
print(df) |
995,767 | c97949a4d1fdadab4346296f8eacc539a5007e63 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 14:08:05 2018
@author: likkhian
"""
import numpy as np
class DecisionTree():
def __init__(self,max_depth=5,min_samples_split=1,debug=False):
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.debug = debug
#def gini(self, a1, a2):
def gini(self,a1,a2):
def ginicalc(array):
positives = sum(array)
total = len(array)
return 1 - (positives/total)**2 - (1-positives/total)**2
w1 = len(a1)/(len(a1)+len(a2))
w2 = 1-w1
return ginicalc(a1)*w1 + ginicalc(a2)*w2
def fit(self,x,y):
def split_data(feature,cutoff,xx,yy):
mask = xx[:,feature] < cutoff
left_y = yy[mask]
left_x = xx[mask]
right_y = yy[~mask]
right_x = xx[~mask]
return left_x,left_y,right_x,right_y
def get_split(x,y):
nsamp,nfeat = np.shape(x)
if self.debug:
print('x shape',np.shape(x),np.shape(y))
min_gini=999 # 0.5
for feature in range(nfeat):
unique_vals = np.unique(x[:,feature])
if self.debug:
print('unique_vals',unique_vals)
for ii in range(1,len(unique_vals)):
left_x, left_y, right_x, right_y = split_data(feature,unique_vals[ii],x,y)
gini_val = self.gini(left_y,right_y)
if self.debug:
print('gini', gini_val)
if gini_val<min_gini:
min_gini=gini_val
location=[feature,unique_vals[ii]]
best_left_x,best_left_y,best_right_x,best_right_y = left_x, left_y, right_x, right_y
# print('split deet',location,min_gini)
return {'index':location[0],'value':location[1], \
'groups': [best_left_x, best_left_y, best_right_x, best_right_y]}
# get_split(x,y)
def get_consensus(leaf):
# note: if leafs are equal then peeps gon die.
return np.bincount(leaf).argmax()
def check_pure_feat(x_array):
row,col = np.shape(x_array)
for ii in range(col):
if len(np.unique(x_array[:,ii])) > 2:
return False
return True
def split(node,depth):
left_x, left_y, right_x, right_y = node['groups']
# print('shaply',np.shape(left_x),np.shape(left_y),np.shape(right_x),np.shape(right_y))
del(node['groups'])
# print(left_y,right_y,'sizes')
# check if either group is empty
if((not left_y.size) or (not right_y.size) ):
node['left'] = node['right'] = get_consensus(list(left_y)+list(right_y))
# print('grp empty')
return
# check if we be deep
if depth >= self.max_depth:
node['left'], node['right'] = get_consensus(left_y),get_consensus(right_y)
if self.debug:
print('too deep!')
return
# check if we are at the left end
if len(left_y) <= self.min_samples_split:
node['left'] = get_consensus(left_y)
elif len(np.unique(left_y)) == 1: #leaf is pure
node['left'] = get_consensus(left_y)
elif check_pure_feat(left_x): #features are pure
node['left'] = get_consensus(left_y)
else:
node['left'] = get_split(left_x,left_y)
split(node['left'],depth+1)
# do the right thing
if len(right_y) <= self.min_samples_split:
node['right'] = get_consensus(right_y)
elif len(np.unique(right_y)) == 1: #leaf is pure
node['right'] = get_consensus(right_y)
elif check_pure_feat(right_x): #features are pure
node['right'] = get_consensus(right_y)
else:
node['right'] = get_split(right_x,right_y)
split(node['right'],depth+1)
def grow_tree(train_x,train_y):
root = get_split(train_x,train_y)
split(root,1)
return root
self.mature_tree = grow_tree(x,y)
# return grow_tree(x,y)
def konica(self,node, depth=0):
if isinstance(node, dict):
print('hi, %s[X%d < %.3f]' % ((depth*' ', (node['index']+1), node['value'])))
konica(node['left'], depth+1)
konica(node['right'], depth+1)
else:
print('%s[%s]' % ((depth*' ', node)))
#x = np.array([[5,4,3,2,1,9,9,9,9,9],[9,9,9,9,9,1,2,3,4,5]]).T
#y = np.array([1,1,1,1,1,0,0,0,0,0])
##fit(x,y)
#
#dataset = np.array([[2.771244718,1.784783929],
# [1.728571309,1.169761413],
# [3.678319846,2.81281357],
# [3.961043357,2.61995032],
# [2.999208922,2.209014212],
# [7.497545867,3.162953546],
# [9.00220326,3.339047188],
# [7.444542326,0.476683375],
# [10.12493903,3.234550982],
# [6.642287351,3.319983761]])
#dataset = np.array([[5,8],
# [5,8],
# [5,8],
# [5,1],
# [5,1],
# [2,8],
# [2,8],
# [2,8],
# [2,1],
# [2,1]])
#y = np.array([0,0,0,1,1,1,1,1,0,0])
#tree = fit(dataset,y,max_depth=3,min_samples_split=3)
#konica(tree)
def predict(tree,x):
prediction = []
def predictor(tree,row):
if row[tree['index']] < tree['value']:
if isinstance(tree['left'],dict):
return predictor(tree['left'],row)
else: return tree['left']
else:
if isinstance(tree['right'],dict):
return predictor(tree['right'],row)
else: return (tree['right'])
for row in x:
prediction.append(predictor(tree.mature_tree,row))
return np.array(prediction)
#predict(tree,dataset)
class RandomForest():
def __init__(self,num_trees,max_depth=5,subsample_size=1.0,feature_proportion=1.0):
self.num_trees = num_trees
self.max_depth = max_depth
self.subsample_size = subsample_size
self.feature_proportion = feature_proportion
# print('random forested')
def fit(self,x,y):
self.forest = []
# print('random forest 0')
for run in range(self.num_trees):
# print('random forest stage ',run)
sample_index = np.random.choice(len(y),int(self.subsample_size*(len(y))))
x_sample = x[sample_index,:]
y_sample = y[sample_index]
feature_index = np.random.choice(np.shape(x_sample)[1], \
int(self.feature_proportion*np.shape(x_sample)[1]),replace=False)
x_sample = x_sample[:,feature_index]
current_tree = DecisionTree(max_depth = self.max_depth)
current_tree.fit(x_sample,y_sample)
self.forest.append(current_tree)
def predict(self,x):
results = []
for tree in self.forest:
results.append(tree.predict(x))
mean_results = np.mean(results,axis=0)
mean_results[mean_results>0.5] = 1
mean_results[mean_results<=0.5] = 0
return mean_results
|
995,768 | 2800320db8ee8ab7b968c0002de83b74b11b2ff2 | list = [12,24,35,24,24,88,120,155,88,120,155]
range = []
for i in list[:]:
flag = 0
for j in range :
if i == j:
flag = 1
if flag == 1 :
continue
else :
range.append(i)
print range
|
995,769 | ab10ec5e900f6a554a1df87bbf5384bd148bb2dc | import sys
graph=[list(map(int,sys.stdin.readline().split()))for _ in range(9)]
xSet=[list([False]*10)for _ in range(9)]
ySet=[list([False]*10)for _ in range(9)]
box=[list([False]*10) for _ in range(9)]
dia=[[i//3*3+j//3 for j in range(9)]for i in range(9)]
spot=[]
for i in range(9):
for j in range(9):
if graph[i][j]==0:
spot.append([i,j])
else:
xSet[i][graph[i][j]]=True
ySet[j][graph[i][j]]=True
box[dia[i][j]][graph[i][j]]=True
def travel(ceil):
if ceil==len(spot):
for i in range(9):
print(' '.join(map(str,graph[i])))
exit()
y,x=spot[ceil]
for val in range(1,10):
if xSet[y][val] or ySet[x][val] or box[dia[y][x]][val]: continue
xSet[y][val]=ySet[x][val]=box[dia[y][x]][val]=True
graph[y][x]=val
travel(ceil+1)
xSet[y][val]=ySet[x][val]=box[dia[y][x]][val]=False
graph[y][x]=0
travel(0)
|
995,770 | d6d2e58f1b2c78039f63ddecea46031c85e39e6a | # coding: utf-8
import dataclasses
import typing
import serpyco
from guilang.description import Description
from guilang.description import Part
from rolling.action.base import WithResourceAction
from rolling.action.base import WithStuffAction
from rolling.action.base import get_with_resource_action_url
from rolling.action.base import get_with_stuff_action_url
from rolling.exception import ImpossibleAction
from rolling.model.effect import CharacterEffectDescriptionModel
from rolling.rolling_types import ActionType
from rolling.server.link import CharacterActionLink
if typing.TYPE_CHECKING:
from rolling.model.character import CharacterModel
from rolling.model.stuff import StuffModel
from rolling.game.base import GameConfig
from rolling.kernel import Kernel
@dataclasses.dataclass
class EatResourceModel:
pass
@dataclasses.dataclass
class EatStuffModel:
pass
class EatResourceAction(WithResourceAction):
input_model: typing.Type[EatResourceModel] = EatResourceModel
input_model_serializer = serpyco.Serializer(input_model)
@classmethod
def get_properties_from_config(cls, game_config: "GameConfig", action_config_raw: dict) -> dict:
return {
"accept_resources": [
game_config.resources[r] for r in action_config_raw["accept_resources"]
],
"effects": [
game_config.character_effects[e] for e in action_config_raw["character_effects"]
],
"require": action_config_raw["require"],
}
def check_is_possible(self, character: "CharacterModel", resource_id: str) -> None:
accept_resources_ids = [rd.id for rd in self._description.properties["accept_resources"]]
if resource_id in accept_resources_ids:
return
raise ImpossibleAction("Non consommable")
def check_request_is_possible(
self, character: "CharacterModel", resource_id: str, input_: EatResourceModel
) -> None:
self.check_is_possible(character, resource_id)
# TODO BS 2019-09-14: perf
carried_resource = next(
(
cr
for cr in self._kernel.resource_lib.get_carried_by(character.id)
if cr.id == resource_id
)
)
require = self._description.properties["require"]
if carried_resource.quantity >= require:
return
unit_name = self._kernel.translation.get(carried_resource.unit)
raise ImpossibleAction(
f"Vous ne possédez pas assez de {carried_resource.name} "
f"({require} {unit_name} requis)"
)
def get_character_actions(
self, character: "CharacterModel", resource_id: str
) -> typing.List[CharacterActionLink]:
accept_resources_ids = [rd.id for rd in self._description.properties["accept_resources"]]
# TODO BS 2019-09-14: perf
carried_resource = next(
(
cr
for cr in self._kernel.resource_lib.get_carried_by(character.id)
if cr.id == resource_id
)
)
if carried_resource.id in accept_resources_ids:
return [
# FIXME BS NOW: il semblerait que que comme on ne donne pas le description_id,
# lorsque on veux consommer la resource, l'action factory prend la première, et donc
# pas la bonne. Revoir ça, je pense qu'il faut systématiquement donner un
# description_id. Voir les conséquences.
CharacterActionLink(
name=f"Manger {carried_resource.name}",
link=get_with_resource_action_url(
character_id=character.id,
action_type=ActionType.EAT_RESOURCE,
resource_id=resource_id,
query_params={},
action_description_id=self._description.id,
),
cost=None,
)
]
return []
def perform(
self, character: "CharacterModel", resource_id: str, input_: input_model
) -> Description:
character_doc = self._character_lib.get_document(character.id)
effects: typing.List[CharacterEffectDescriptionModel] = self._description.properties[
"effects"
]
self._kernel.resource_lib.reduce_carried_by(
character.id,
resource_id,
quantity=self._description.properties["require"],
commit=False,
)
for effect in effects:
self._effect_manager.enable_effect(character_doc, effect)
self._kernel.server_db_session.add(character_doc)
self._kernel.server_db_session.commit()
return Description(
title="Action effectué",
footer_links=[
Part(is_link=True, go_back_zone=True, label="Retourner à l'écran de déplacements"),
Part(
is_link=True,
label="Voir l'inventaire",
form_action=f"/_describe/character/{character.id}/inventory",
classes=["primary"],
),
],
)
class EatStuffAction(WithStuffAction):
input_model: typing.Type[EatStuffModel] = EatStuffModel
input_model_serializer = serpyco.Serializer(input_model)
@classmethod
def get_properties_from_config(cls, game_config: "GameConfig", action_config_raw: dict) -> dict:
a = 1
return {
"accept_stuff_ids": action_config_raw["accept_stuffs"],
"effects": [
game_config.character_effects[e] for e in action_config_raw["character_effects"]
],
}
def check_is_possible(self, character: "CharacterModel", stuff: "StuffModel") -> None:
# TODO BS 2019-07-31: check is owned stuff
if stuff.stuff_id in self._description.properties["accept_stuff_ids"]:
return
raise ImpossibleAction(f"Vous ne pouvez pas le manger")
def check_request_is_possible(
self, character: "CharacterModel", stuff: "StuffModel", input_: input_model
) -> None:
self.check_is_possible(character, stuff)
def get_character_actions(
self, character: "CharacterModel", stuff: "StuffModel"
) -> typing.List[CharacterActionLink]:
if stuff.stuff_id in self._description.properties["accept_stuff_ids"]:
return [
CharacterActionLink(
name=f"Manger {stuff.name}",
link=get_with_stuff_action_url(
character.id,
ActionType.EAT_STUFF,
query_params={},
stuff_id=stuff.id,
action_description_id=self._description.id,
),
cost=self.get_cost(character, stuff),
)
]
return []
def perform(
self, character: "CharacterModel", stuff: "StuffModel", input_: input_model
) -> Description:
character_doc = self._character_lib.get_document(character.id)
effects: typing.List[CharacterEffectDescriptionModel] = self._description.properties[
"effects"
]
self._kernel.stuff_lib.destroy(stuff.id, commit=False)
for effect in effects:
self._effect_manager.enable_effect(character_doc, effect)
self._kernel.server_db_session.add(character_doc)
self._kernel.server_db_session.commit()
return Description(
title="Action effectué",
footer_links=[
Part(is_link=True, go_back_zone=True, label="Retourner à l'écran de déplacements"),
Part(
is_link=True,
label="Voir l'inventaire",
form_action=f"/_describe/character/{character.id}/inventory",
classes=["primary"],
),
],
)
|
995,771 | 0ad8aadb90234f727d2ef25260ad9d57d0d1b476 | #!/usr/bin/python
#Python implementation of Shamir's Secret Sharing using the BGW protocol
#Author: Patrick Crain
#BGW reference: http://cseweb.ucsd.edu/classes/fa02/cse208/lec12.html
import sys, os, random, json, time, shutil
from random import shuffle
from mpmath import * #mpmath for arbitrary float precision
mp.dps = 500; mp.pretty = True
#Max value for our finite field arithmetic
PRIME = 2074722246773485207821695222107608587480996474721117292752992589912196684750549658310084416732550077
DEBUG = True #Whether to print debug stuff
N = 30 #Max number of parties
SMAX = int(sqrt(PRIME)) #Maximum value for our secrets
PMAX = int(pow(PRIME,1/N)) #Maximum value for polynomial coefficients
CLOUD = "_cloud/" #Directory for temporary computation files
PRIV = "known-secrets/" #Directory for known secrets
#Terminal color codes
class col:
BLN ='\033[0m' # Blank
UND ='\033[1;4m' # Underlined
INV ='\033[1;7m' # Inverted
CRT ='\033[1;41m' # Critical
BLK ='\033[1;30m' # Black
RED ='\033[1;31m' # Red
GRN ='\033[1;32m' # Green
YLW ='\033[1;33m' # Yellow
BLU ='\033[1;34m' # Blue
MGN ='\033[1;35m' # Magenta
CYN ='\033[1;36m' # Cyan
WHT ='\033[1;37m' # White
def dprint(string):
if DEBUG:
print(string)
def dnprint(string,prec):
if DEBUG:
nprint(string,prec)
#Load data from a JSON file
# fname = file to load from
def jload(fname):
if not os.path.exists(fname):
return None
with open(fname,'r') as j:
x = json.load(j)
return x
#Compute the greatest common denominator of a and b
#Based on code from (https://github.com/rhyzomatic/pygarble)
# a = first number
# b = second number
def extendedEuclideanAlgorithm(a, b):
if abs(b) > abs(a):
(x,y,d) = extendedEuclideanAlgorithm(b, a)
return (y,x,d)
if abs(b) == 0:
return (1, 0, a)
x1, x2, y1, y2 = 0, 1, 1, 0
while b != 0:
q = floor(a / b)
r = floor(fmod(a,b))
x = x2 - q*x1
y = y2 - q*y1
a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y
return (x2, y2, a)
#Find the multiplicative inverse of n mod PRIME
# n = number to find the multiplicative inverse for
def inverse(n):
x,y,d = extendedEuclideanAlgorithm(n, PRIME)
return floor(fmod(x,PRIME))
#Compute the lagrange polynomial for a list of points
#Based on code from (https://github.com/rhyzomatic/pygarble)
# points = a list of points (x,y)
def lagrange(points):
def P(x):
total = 0
n = len(points)
for i in range(0,n):
xi, yi = points[i]
def g(i, n):
tot_mul = 1
for j in range(0,n):
if i == j:
continue
xj, yj = points[j]
tot_mul *= (x - xj) / mpf(xi - xj)
return tot_mul
total += yi * g(i, n)
return total
return P
#Generate a polynomial of degree d with intercept y
# y = y-intercept of polynomial
# d = degree of polynomial
def polygen(y,d):
return [y] + [random.randrange(PMAX) for i in range(0,d)]
#Evaluate a polyonmial p at point x
# p = polynomial to evaluate
# x = point to evaluate the polynomial at
def evalpolyat(p,x):
return p[0] + sum([(p[i]*pow(x,i)) for i in range(1,len(p))])
#Compute the Vandermonde matrix from an array
# arr = Array the Vandermonde matrix is based off of
def vandermonde(arr):
v = []
for x in arr:
vr = [1]
for i in range(1,len(arr)):
vr.append(x*vr[i-1])
v.append(vr)
# dprint(col.GRN + "V: \n" + col.BLN + str(v))
return matrix(v)
#Compute an (n,t) diagonal matrix
# n = total number of parties
# t = threshold number of parties
def diag(n,t):
a = matrix([[(1 if (i==j and i>=0 and i<=t) else 0) for j in range(0,n)] for i in range(0,n)])
# dprint(col.GRN + "P:" + col.BLN)
# dnprint(a,5)
return a
#Generate the A matrix as described here (http://cseweb.ucsd.edu/classes/fa02/cse208/lec12.html)
# t = threshold number of parties
# ps = list of parties involved in the computation
def genMatrixA(t,ps):
v = vandermonde(ps)
p = diag(len(ps),t)
A = (v*p)*(v**-1)
# dprint(col.GRN + "A: " + col.BLN + "\n" + nstr(A,5))
return A
#Easy one-line write to file
# path = file to write to
# line = string to write
def easyWrite(path,line):
path = CLOUD+path
with open(path,"w") as of:
of.write(line)
#Easy one-line read from file
# path = file to read from
def easyRead(path):
path = CLOUD+path
if not os.path.exists(path):
print("Waiting for " + path + "...")
while not os.path.exists(path):
time.sleep(1)
with open(path,"r") as inf:
return inf.read().split("\n")[0]
#Class for a party involved in the computation
class Party:
#Constructor for Party
# myid = ID / x-value of the party for shares
# j = Party's index in the list of party ids
def __init__(self,myid,j):
self.id = myid #ID / x-value of the party for shares
self.relid = j #Party's index in the list of party ids
self.secretshares = {} #Secrets which this party has shares for
self.ranpoly = {} #Secrets which this party has random polynomials for
self.ranshares = {} #Secrets which this party has collected random shares for
self.vshares = {} #Secrets which this party has computed v-shares for
self.sshares = {} #Computed shares of degree-reduced secrets
#Generate a random polynomial for use in degree reduction
# s1 = name of first share the polynomial is based off
# s2 = name of second share the polynomial is based off
# t = degree the polynomial will be reduced to
def genRandomP(self,s1,s2,t):
self.ranpoly[s1+"*"+s2] = polygen(0,t*2)
# print(col.GRN + "r[i]: " + col.BLN + str(self.ranpoly))
#Generate shares from a random polynomial and distribute them to disk
# s1 = name of first share the random shares are based off
# s2 = name of second share the random shares are based off
# oids = list of other parties to send shares to
def writeRanShares(self,s1,s2,oids):
name=s1+"*"+s2
for o in oids:
s = evalpolyat(self.ranpoly[name],o)
easyWrite(str(o)+"/"+str(self.id)+"-"+s1+"-"+s2+"-ranshare",str(s))
#Accept random shares from parties listen in oids (from disk)
# s1 = name of first share the random shares are based off
# s2 = name of second share the random shares are based off
# oids = list of parties to receive shares from
def loadRanShares(self,s1,s2,oids):
name=s1+"*"+s2
for o in oids:
share = mpmathify(easyRead(str(self.id)+"/"+str(o)+"-"+s1+"-"+s2+"-ranshare"))
if name in self.ranshares.keys():
self.ranshares[name].append(share)
else:
self.ranshares[name] = [share]
#Compute shares of v for two other shares
# s1 = name of first share the v share is based off
# s2 = name of second share the v share is based off
def computeVShare(self,s1,s2):
name=s1+"*"+s2
# self.vshares[name] = self.secretshares[s1][1]*inverse(self.secretshares[s2][1])
# self.vshares[name] = self.secretshares[s1][1]/(self.secretshares[s2][1])
self.vshares[name] = self.secretshares[s1][1]*(self.secretshares[s2][1])
self.vshares[name] += sum(self.ranshares[name])
# dprint(col.GRN + "v: " + col.BLN + str(self.vshares[name]))
#Share a previously computed share of v to other parties
# s1 = name of first share the v share is based off
# s2 = name of second share the v share is based off
# oids = list of other parties to send shares to
def shareVShare(self,s1,s2,oids):
rpoly = polygen(self.vshares[s1+"*"+s2],int((len(oids)-1)/2))
for o in oids:
s = evalpolyat(rpoly,o)
easyWrite(str(o)+"/"+str(self.id)+"-"+s1+"-"+s2+"-vsubshare",str(s))
#Run a linear protocol to compute Av
# s1 = name of first share the linear share is based off
# s2 = name of second share the linear share is based off
# oids = list of other parties to send shares to
def computeLinearShares(self,s1,s2,oids):
name=s1+"*"+s2
A = genMatrixA(int((len(oids)-1)/2),oids)
ki = 0
for k in oids:
total = 0
ii = 0
for i in oids:
vs = mpmathify(easyRead(str(self.id)+"/"+str(i)+"-"+s1+"-"+s2+"-vsubshare"))
total += (vs*A[ki,ii])
ii += 1
easyWrite(str(k)+"/"+str(self.id)+"-"+s1+"-"+s2+"-vnewshare",str(total))
ki += 1
#Reconstruct degree t share for s1*s2
# s1 = name of first share to reconstruct from
# s2 = name of second share to reconstruct from
# oids = list of other parties to send shares to
def reconstructSShare(self,s1,s2,oids):
svals = []
for o in oids:
v = easyRead(str(self.id)+"/"+str(o)+"-"+s1+"-"+s2+"-vnewshare")
svals.append((o,mpmathify(v)))
s = nint(lagrange(svals)(0)) % PRIME
self.sshares[s1+"*"+s2] = (self.id,s)
# print(col.BLU+str(s)+col.BLN)
# s = evalpolyat(spoly,0)
#Load a secret share from disk
# name = name of the share to load
def loadSecretShare(self,name):
if not DEBUG:
print(col.WHT+"Loading share " + str(self.id) + " of "+name+col.BLN)
fname = str(self.id)+"/"+name+"-share"
line = mpmathify(easyRead(fname))
self.secretshares[name] = (self.id,line)
return(self.secretshares[name])
#Write the sum of shares s1,s2 to disk
# s1 = name of first share the summed share is based off
# s2 = name of second share the summed share is based off
# newname = name of the new summed share
def writeSummedShare(self,s1,s2,newname):
print(col.WHT+"Writing share "+s1+"+"+s2+"[" + str(self.id) + "] to file"+col.BLN)
easyWrite(str(self.id)+"/"+newname+"-share",str(self.secretshares[s1][1]+self.secretshares[s2][1]))
#Write the sum of share s and constant c to disk
# s1 = name of share to be summed
# s2 = constant to sum share with
# newname = name of the new summed share
def writeConstSumShare(self,s,c,newname):
print(col.WHT+"Writing share "+newname+"[" + str(self.id) + "] to file"+col.BLN)
easyWrite(str(self.id)+"/"+"-"+newname+"-share",str(self.secretshares[s][1]+c))
#Write the difference of shares s1,s2 to disk
# s1 = name of first share the subtracted share is based off
# s2 = name of second share the subtracted share is based off
# newname = name of the new subtracted share
def writeSubbedShare(self,s1,s2,newname):
print(col.WHT+"Writing share "+s1+"-"+s2+"[" + str(self.id) + "] to file"+col.BLN)
easyWrite(str(self.id)+"/"+newname+"-share",str(self.secretshares[s1][1]-self.secretshares[s2][1]))
#Write the difference of share s and constant c to disk
# s1 = name of share to be subtract from
# s2 = constant to subtract with
# newname = name of the new summed share
def writeConstSubShare(self,s,c,newname):
print(col.WHT+"Writing share "+newname+"[" + str(self.id) + "] to file"+col.BLN)
easyWrite(str(self.id)+"/"+newname+"-share",str(self.secretshares[s][1]-c))
#Write the product of shares s1,s2 to disk
# s1 = name of first share the multiplied share is based off
# s2 = name of second share the multiplied share is based off
# newname = name of the new multiplied share
def writeMultipliedShare(self,s1,s2,newname):
print(col.WHT+"Writing share "+newname+"[" + str(self.id) + "] to file"+col.BLN)
easyWrite(str(self.id)+"/"+newname+"-share",str(self.sshares[s1+"*"+s2][1]))
#Write the product of share s and constant c to disk
# s1 = name of share to be multiplied
# s2 = constant to multiply share by
# newname = name of the new multiplied share
def writeConstMultipleShare(self,s,c,newname):
print(col.WHT+"Writing share "+newname+"[" + str(self.id) + "] to file"+col.BLN)
easyWrite(str(self.id)+"/"+newname+"-share",str(self.secretshares[s][1]*c))
|
995,772 | 497fda2bad9f3a7cf7fceaf8df87a97c2c5dd50d | # ChoiceModels
# See full license in LICENSE
from .mergedchoicetable import *
from .simulation import * |
995,773 | 80f6b00c7d668b217322e813281da57ebcaeee61 | #!/usr/bin/env python
import rospy
import math
import tf
if __name__=='__main__':
rospy.init_node('frame_a_frame_b_listener_node')
listener = tf.TransformListener()
rate = rospy.Rate(1.0)
listener.waitForTransform('/frame_a','/frame_b',rospy.Time(),rospy.Duration(4.0))
while (not rospy.is_shutdown()):
try:
(trans,rot) = listener.lookupTransform('/frame_a', '/frame_b', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
quaternion = rot
rpy = tf.transformations.euler_from_quaternion(quaternion)
print 'transformation between frame_a and frame_b detected '
print 'translation vector: ',trans[0],',',trans[1],',',trans[2],')'
print 'rotation angles: roll = ',rpy[0],'pitch = ',rpy[1],'yaw= ',rpy[2]
rate.sleep() |
995,774 | dd95f280a37c2535619154229fe5115da5901e12 | import pytest
from typing import List, Tuple, Optional, Union
from block import Block
from blocky import _block_to_squares
from goal import BlobGoal, PerimeterGoal, _flatten, generate_goals, Goal
from player import _is_move_valid, _get_block, create_players, Player, SmartPlayer, RandomPlayer, HumanPlayer
from renderer import Renderer
from settings import COLOUR_LIST
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PACIFIC_POINT = (1, 128, 181)
OLD_OLIVE = (138, 151, 71)
REAL_RED = (199, 44, 58)
MELON_MAMBO = (234, 62, 112)
DAFFODIL_DELIGHT = (255, 211, 92)
TEMPTING_TURQUOISE = (75, 196, 213)
def set_children(block: Block, colours: List[Optional[Tuple[int, int, int]]]) \
-> None:
"""Set the children at <level> for <block> using the given <colours>.
Precondition:
- len(colours) == 4
- block.level + 1 <= block.max_depth
"""
size = block._child_size()
positions = block._children_positions()
level = block.level + 1
depth = block.max_depth
block.children = [] # Potentially discard children
for i in range(4):
b = Block(positions[i], size, colours[i], level, depth)
block.children.append(b)
def lone_block() -> Block:
return Block((0, 0), 750, REAL_RED, 0, 0)
def one_block_four_children_(max_depth: int) -> Block:
b = Block((0, 0), 750, None, 0, max_depth)
set_children(b, [TEMPTING_TURQUOISE, MELON_MAMBO, REAL_RED, OLD_OLIVE])
return b
def one_block_sixteen_grandkids_(max_depth: int) -> Block:
b = Block((0, 0), 750, None, 0, max_depth)
set_children(b, [None, None, None, None])
for child in b.children:
set_children(child, [TEMPTING_TURQUOISE, MELON_MAMBO,
REAL_RED, OLD_OLIVE])
return b
def one_block_4_children_8_grandkids_4_great_grandkids_(max_depth: int) -> Block:
b = Block((0, 0), 750, None, 0, max_depth)
set_children(b, [TEMPTING_TURQUOISE, DAFFODIL_DELIGHT, MELON_MAMBO,
OLD_OLIVE])
for i in range(2):
b.children[i].colour = None
set_children(b.children[i], [TEMPTING_TURQUOISE, MELON_MAMBO,
REAL_RED, TEMPTING_TURQUOISE])
b.children[1].children[3].colour = None
set_children(b.children[1].children[3], [OLD_OLIVE,
OLD_OLIVE, REAL_RED,
OLD_OLIVE])
return b
def one_block_4_kids_one_kid_has_4_kids_(max_depth: int) -> Block:
b = Block((0, 0), 750, None, 0, max_depth)
set_children(b, [TEMPTING_TURQUOISE, MELON_MAMBO, REAL_RED, OLD_OLIVE])
b.children[2].colour = None
set_children(b.children[2], [TEMPTING_TURQUOISE, MELON_MAMBO,
REAL_RED, REAL_RED])
return b
def complicated_block_depth_3_(max_depth: int) -> Block:
b = Block((0, 0), 750, None, 0, max_depth)
set_children(b, [None, None, None, None])
set_children(b.children[0], [TEMPTING_TURQUOISE, OLD_OLIVE,
REAL_RED, MELON_MAMBO])
set_children(b.children[1], [OLD_OLIVE, MELON_MAMBO,
REAL_RED, None])
set_children(b.children[1].children[3], [TEMPTING_TURQUOISE, MELON_MAMBO,
MELON_MAMBO, REAL_RED])
set_children(b.children[2], [OLD_OLIVE, TEMPTING_TURQUOISE, OLD_OLIVE, None])
set_children(b.children[2].children[3], [REAL_RED, REAL_RED,
TEMPTING_TURQUOISE,
TEMPTING_TURQUOISE])
set_children(b.children[3], [None, OLD_OLIVE, MELON_MAMBO,
TEMPTING_TURQUOISE])
set_children(b.children[3].children[0], [TEMPTING_TURQUOISE, REAL_RED,
MELON_MAMBO, REAL_RED])
return b
def complicated_block_depth_2_(max_depth: int) -> Block:
b = Block((0, 0), 750, None, 0, max_depth)
set_children(b, [REAL_RED, None, OLD_OLIVE, BLACK])
set_children(b.children[1], [REAL_RED, OLD_OLIVE, MELON_MAMBO, BLACK])
return b
# TESTS FOR CREATE_COPY #
def test_create_copy_simple() -> None:
b = lone_block()
b_copy = b.create_copy()
assert b == b_copy
assert b is not b_copy
b.colour = BLACK
assert b != b_copy
def test_create_copy_cd2() -> None:
b = complicated_block_depth_2_(2)
b_copy = b.create_copy()
assert b == b_copy
assert b is not b_copy
for i in range(4):
assert b.children[i] == b_copy.children[i]
assert b.children[i] is not b_copy.children[i]
for i in range(4):
assert b.children[1].children[i] == b_copy.children[1].children[i]
assert b.children[1].children[i] is not b_copy.children[1].children[i]
# TESTS FOR ROTATE #
def test_rotate_lone() -> None:
b = lone_block()
assert not b.rotate(1)
assert b == b
def test_rotate_logic() -> None:
b = complicated_block_depth_3_(3)
b2 = complicated_block_depth_3_(3)
b.rotate(1)
b2.rotate(3)
b2.rotate(3)
b2.rotate(3)
assert b == b2
def test_non_rotation() -> None:
b = one_block_4_kids_one_kid_has_4_kids_(5)
assert not b.children[0].rotate(1)
assert b.children[2].rotate(1)
assert b.children[2].children[0].colour == MELON_MAMBO
assert b.children[2].children[1].colour == REAL_RED
assert b.children[2].children[2].colour == REAL_RED
assert b.children[2].children[3].colour == TEMPTING_TURQUOISE
# TESTS FOR SWAP #
def test_swap_lone() -> None:
b = lone_block()
assert not b.swap(0)
b2 = lone_block()
assert b == b2
assert b is not b2
def test_swap_logic() -> None:
board = complicated_block_depth_3_(3)
b = board.children[1].create_copy()
b2 = board.children[1].create_copy()
assert board.children[1].swap(0)
assert b.swap(0)
assert board.children[1] == b
assert board.children[1].swap(0)
assert board.children[1] == b2
def test_non_swap() -> None:
board = one_block_4_kids_one_kid_has_4_kids_(2)
board_copy = one_block_4_kids_one_kid_has_4_kids_(2)
assert not board.children[1].swap(1)
assert board == board_copy
assert board.children[2].swap(1)
assert board.children[2].children[0].colour == REAL_RED
assert board.children[2].children[1].colour == REAL_RED
assert board.children[2].children[2].colour == MELON_MAMBO
assert board.children[2].children[3].colour == TEMPTING_TURQUOISE
# TESTS FOR PAINT #
def test_paint_lone() -> None:
b = lone_block()
assert not b.paint(REAL_RED)
assert b.paint(TEMPTING_TURQUOISE)
assert b.colour == TEMPTING_TURQUOISE
def test_paint_depth() -> None:
board = one_block_four_children_(1)
board2 = one_block_four_children_(2)
assert board.children[0].paint(MELON_MAMBO)
assert not board.children[1].paint(MELON_MAMBO)
assert not board2.children[0].paint(MELON_MAMBO)
assert not board2.children[1].paint(MELON_MAMBO)
def test_non_paint() -> None:
board = one_block_sixteen_grandkids_(2)
board_copy = one_block_sixteen_grandkids_(2)
for i in range(4):
assert not board.children[i].paint(REAL_RED)
assert board == board_copy
# TESTS FOR COMBINE #
def test_combine_lone() -> None:
b = lone_block()
b2 = lone_block()
assert not b.combine()
assert b == b2
def test_combine_depth() -> None:
board = one_block_4_children_8_grandkids_4_great_grandkids_(3)
board2 = one_block_4_children_8_grandkids_4_great_grandkids_(4)
assert board.children[1].children[3].colour is None
assert board.children[1].children[3].combine()
assert board.children[1].children[3].colour == OLD_OLIVE
assert board2.children[1].children[3].colour is None
assert not board2.children[1].children[3].combine()
assert board2.children[1].children[3].colour is None
# TESTS FOR GENERATE MOVE #
def test_returns_block() -> None:
"""This one fails a lot because block.combine() is buggy because we are
using colours that are not in COLOUR_LIST, something which they do not
test for.
"""
gr = BlobGoal(REAL_RED)
gp = PerimeterGoal(MELON_MAMBO)
board = one_block_four_children_(1)
rp = RandomPlayer(0, gr)
sp = SmartPlayer(1, gp, 10)
rp._proceed = True
sp._proceed = True
move_block_rp = rp.generate_move(board)[2]
move_block_sp = sp.generate_move(board)[2]
assert move_block_rp == board or move_block_rp in board.children
assert move_block_sp == board or move_block_sp in board.children
def test_score_is_greater() -> None:
"""Same shit w last method"""
gp = PerimeterGoal(MELON_MAMBO)
board = one_block_four_children_(1)
sp = SmartPlayer(1, gp, 4)
sp._proceed = True
move = sp.generate_move(board)
score = gp.score(board)
if move[0] != 'pass':
assert _is_move_valid(sp, move[2], (move[0], move[1]))
score2 = gp.score(board)
if move[0] != 'pass':
assert score2 > score
else:
assert score2 == score
if __name__ == '__main__':
pytest.main(['test_cases2.py'])
|
995,775 | fd752c43d4ddd02d1b6672ebbbbc9563c3e0f7db | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 17:21:41 2019
@author: up201808912
"""
import math
def f(x):
return (2*x+1)**2-5*math.cos(10*x)
def regra_aurea_min(x1,x2,intervalo):
B=(math.sqrt(5)-1)/2
A=B**2
while abs(x2-x1)>intervalo:
x3=x1+A*(x2-x1)
x4=x1+B*(x2-x1)
if f(x3)>f(x4):
x1=x3
elif f(x3)<f(x4):
x2=x4
else:
print("ERRROR")
if f(x3)<f(x4):
return x3
else:
return x4
def regra_aurea_max(x1,x2,intervalo):
B=(math.sqrt(5)-1)/2
A=B**2
i=0
while abs(x2-x1)>intervalo:
i+=1
x3=x1+A*(x2-x1)
x4=x1+B*(x2-x1)
if f(x3)<f(x4):
x1=x3
elif f(x3)>f(x4):
x2=x4
else:
print("ERRROR")
print(i)
if f(x3)>f(x4):
return x3
else:
return x4
intervalo=10**-3
x1=-1
x2=0
print(regra_aurea_min(x1,x2,intervalo))
print(regra_aurea_max(x1,x2,intervalo))
|
995,776 | e63511f23cdfb43935c61ae83983fbfbfc392764 | from django.shortcuts import render
# Create your views here.
from django.shortcuts import HttpResponse
from fantasy.models import Character
def add(request):
a = request.GET['a']
b = request.GET['b']
c = int(a) + int(b)
return HttpResponse(str(c))
def add2(request, a, b):
c = int(a) + int(b)
return HttpResponse(str(c))
def index2(request):
data = Character.objects.all()
staff_str = list(map(lambda a: a.name, data))
context = {}
context['label'] = ' '.join(staff_str)
return render(request, 'home.html', context)
# 视图显示和请求处理=============================================================
def form(request):
return render(request, 'investigate.html')
def investigate(request):
rlt = request.GET['staff']
return HttpResponse(rlt)
from django.http import HttpResponseRedirect
from django.urls import reverse
def operationDB(request):
# objects是是django默认的管理器对象,自己也可以在Character类下设置:objects=models.Manage()
data = Character.objects.all()
# staff_str = list(map(lambda a:a.name, data))
# return HttpResponse("<p>" + ' '.join(staff_str) + "</p>")
return render(request, 'for.html', {"staffs": data})
def getname(request):
alldata = Character.objects.all()
return render(request, 'nameget.html', {"name": alldata})
def addname(request):
if request.POST:
newname = request.POST['staff']
newdata = Character(name=newname)
newdata.save()
return HttpResponseRedirect(reverse('getname'))
|
995,777 | a5eadbb16815bd14141645f951b301e2a1563e07 | import random
class Search:
"""Analyze board for best positions to play."""
def __init__(self, args=()):
self.key_values = []
self.most_valuable = []
def strategy(self, game, args=()):
"""Return open key for player to play in game."""
def evaluate(self, board):
"""Set key_values and arg_maxes. Assign value for some open keys.
Find the most valuable among them."""
def reset(self, board):
"""Prepare self for upcoming game."""
def advance(self, board):
"""Prepare self for upcoming move given other player's move."""
def get_most_valuable(self):
"""Return most valued open keys."""
return self.most_valuable
def get_key_values(self):
"""Return key value pairs, denoting value of each open key relative to
current player."""
return self.key_values
def get_norm_key_values(self, board):
"""Return key value pairs, values normalized from 0 to 100."""
return self.key_values
def explore(self, *args):
"""Explore state space as necessary."""
class RandomSearch(Search):
def strategy(self, game, args=()):
"""Return open key for player to play in game. Randomly choose from
most valuable keys."""
return random.choice(list(game.board.open_keys.keys()))
def evaluate(self, board):
"""All keys valued uniformly."""
self.most_valuable = sorted(board.open_keys.keys())
self.key_values = [(key, 50) for key in self.most_valuable]
class TreeSearch(Search):
def __init__(self, tree_inst):
self.tree = tree_inst
super().__init__()
def explore(self, board, args):
"""Explore state space as necessary."""
self.tree.explore(board, *args)
def strategy(self, game, args):
self.tree.explore(game.board, *args)
self.evaluate(game.board)
return random.choice(self.most_valuable)
def reset(self, board):
# self.tree.table = self.tree.table_cls()
for i in range(board.moves()):
self.tree.table.clear_moves(i)
# def advance(self, board):
# if board.moves():
# self.tree.table.clear_moves(board.moves()-1)
def evaluate(self, board):
self.most_valuable = self.tree.most_valuable(board)
class IterativeDeepeningTreeSearch(TreeSearch):
def explore(self, board, args):
for depth in range(1, args[0]+1):
self.tree.principal_explore(board, depth)
def strategy(self, game, args):
for depth in range(1, args[0]+1):
self.tree.principal_explore(game.board, depth)
self.evaluate(game.board)
return random.choice(self.most_valuable)
class TimeIterativeDeepeningTreeSearch(IterativeDeepeningTreeSearch):
def strategy(self, game, args):
alarm = time.time() + game.current_time() / 2 - 1
max_depth = args[0]+1
for depth in range(1, max_depth):
if time.time() > alarm:
break
self.tree.principal_explore(alarm, game.board, depth)
self.evaluate(game.board)
return random.choice(self.most_valuable)
class MonteCarloTreeSearch(TreeSearch):
def evaluate(self, board):
self.key_values = self.tree.children_key_values(board)
self.most_valuable = self.tree.most_valuable(board, self.key_values)
def get_norm_key_values(self, board):
return [(key, self.tree.norm_value(board, value)) for key, value
in sorted(self.children_key_values, key=lambda item: item[0])]
class TimeMonteCarloTreeSearch(MonteCarloTreeSearch):
def strategy(self, game, args):
alarm = time.time() + game.current_time() / 2 - 1
if board not in self.tree.table:
self.tree.add_child(game.board)
iterations = args[0]
for _ in range(iterations):
self.tree.playout(game.board)
if time.time() > alarm:
break
self.evaluate(game.board)
return random.choice(self.most_valuable)
|
995,778 | de8f0b9a44dd7af2f4b749ecd729f3aa7793a29a | import matplotlib.pyplot as plt
from Table import *
from MathO import *
class Main(object):
fig = plt.figure( 1 )
plt.xlabel("Значения X")
plt.ylabel("плотность распределения p*")
plt.title("Гистограмма плотности функции распределения")
plt.bar( Math1.otr,Math1.p, align='center', width=0.1, color = 'violet' )
plt.grid(True)
fig2 = plt.figure( 2 )
plt.xlabel( "Значения X" )
plt.ylabel( "плотность распределения p*" )
plt.title( "Гистограмма плотности по распределению Пирсона" )
plt.bar( Math1.pint(Math1), Math1.p , align='center', width=0.1, color='violet' )
plt.grid( True )
plt.show()
if Math1.Pearson(Math1) < tabl_value[Math1.m - 1]:
print("Теоретическое распределение согласуется с выборочным" )
else:
print("Теоретическое распределение не согласуется с выборочным") |
995,779 | d8274ed2b5bc2cdb27e4fa8c0bbe635746c05957 | # Generated by Django 3.2.6 on 2021-08-12 16:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0006_remove_todo_ordre'),
]
operations = [
migrations.AddField(
model_name='todo',
name='couleur',
field=models.CharField(default='is-warning', max_length=255),
preserve_default=False,
),
]
|
995,780 | 3a6bd01495f1cab1cc7662131d7d6fe98b724a9b | import os
import glob
import imageio
from config import NTH_SAVE, GPU
if __name__ == '__main__':
print('Createing GIF ...')
training_img_dir = 'training_images'
num_training_images = len([name for name in os.listdir(training_img_dir)])
output_name = 'MNIST_VAE_Training_{}_epochs.gif'.format(
num_training_images)
with imageio.get_writer(output_name, mode='I') as writer:
filenames = glob.glob('{}/*.png'.format(training_img_dir))
filenames = sorted(filenames)
for i, filename in enumerate(filenames):
# Only add every nth image or last to gif
if i % NTH_SAVE == 0 or i == len(filenames) - 1:
image = imageio.imread(filename)
writer.append_data(image)
# Add extra frames of the last image
if i == len(filenames) - 1:
num_extra_frames = 5
for _ in range(num_extra_frames):
writer.append_data(image)
print('GIF written to {}'.format(os.path.abspath(output_name)))
|
995,781 | b9051ce3a6486928c43dfdf328b7188eb6b965fb | #!/Users/tmcfarlane/pyProjects/3_6_1/BucketListApp/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
995,782 | dd4b79c8d43ea1486e2af05c42dbb38369a94be7 | from classification_contract.wine_test_data.utilities import get_classified_wine
import json
from classification_contract.messages import create_create_message, create_delete_message, read_all_messages
from nose.tools import assert_equals, assert_dict_equal
@given(u'I have a classified wine')
def step_impl(context):
context.classified_wine = get_classified_wine()
@given(u'I POST it to the wines endpoint')
@when(u'I POST it to the wines endpoint')
def step_impl(context):
response = context.client.post('v1/wines/',
data=json.dumps(context.classified_wine),
headers={'content-type':'application/json'})
assert_equals(response.status_code, 201)
response_content = json.loads(response.get_data(as_text=True))
context.classified_wine_id = response_content['id']
@when(u'I DELETE the record')
def step_impl(context):
response = context.client.delete('v1/wines/?id={}'.format(context.classified_wine_id))
assert_equals(response.status_code, 204)
@then(u'the creation is logged')
def step_impl(context):
expected_message = create_create_message(context.classified_wine_id, context.classified_wine)
_assert_contains_message(context, expected_message)
@then(u'the deletion is logged')
def step_impl(context):
expected_message = create_create_message(context.classified_wine_id)
_assert_contains_message(context, expected_message)
def _assert_contains_message(context, expected_message):
messages = read_all_messages(context.test_log_backend.hosts, context.test_log_backend.topic_name)
matching_messages = [message for message in messages
if message['type'] == expected_message['type']
and message['id'] == expected_message['id']]
assert_equals(1, len(matching_messages), 'no/too many matching messages logged')
assert_dict_equal(matching_messages[0], expected_message)
|
995,783 | acb131373553a5831285a4822232a739860afadd | #!/usr/bin/python
import os
import sys
def deduparg(arg):
if arg.startswith("@"): return True
if arg.startswith("-fsanitize="): return True
if arg.startswith("-Wl,-plugin-opt="): return True
if arg.startswith("-Wl,-l:") and arg.endswith(".a"): return True
if arg == "-Wl,-whole-archive,-l:libmetadata.a,-no-whole-archive": return True
return False
def skiparg(arg):
if arg == "-fno-rtti": return True
return False
args = list()
argset = set()
for arg in sys.argv[1:]:
if skiparg(arg): continue
if deduparg(arg) and (arg in argset): continue
args.append(arg)
argset.add(arg)
os.execvp(args[0], args)
|
995,784 | 3a2928da2b495c3c52e99911e9beec94fca5fcef | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.dpacreative.model.format_template_type import FormatTemplateType
globals()['FormatTemplateType'] = FormatTemplateType
from baiduads.dpacreative.model.get_format_templates_response_wrapper_body import GetFormatTemplatesResponseWrapperBody
class TestGetFormatTemplatesResponseWrapperBody(unittest.TestCase):
"""GetFormatTemplatesResponseWrapperBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetFormatTemplatesResponseWrapperBody(self):
"""Test GetFormatTemplatesResponseWrapperBody"""
# FIXME: construct object with mandatory attributes with example values
# model = GetFormatTemplatesResponseWrapperBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
995,785 | 05d727f16fee28eba0c1e5ff2106ae12b3afcfe7 | # Write a python function that takes a sequence of numbers and determines if all the numbers are different from each other(that is they are distinct)
def distinct(n):
num = set()
for i in n:
if i in num:
return "Numbers are not unique."
else:
num.add(i)
return "Numbers are unique."
b = distinct([2, 3, 4, 5, 6, 7, 8, 2])
print(b)
|
995,786 | 3315dcb9cd32ce82dd82885db9fbf20abbdc44bc | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 11 18:08:22 2020
@author: ANA
"""
def w(x,y):
return -1.1*x*y + 12*y + 7*x*x - 8*x
def dwx(x,y):
return -1.1*y + 14*x - 8
def dwy(x,y):
return -1.1*x + 12
def gradiente(x, y, h, numItr):
xn = 0
yn = 0
for i in range(numItr):
xn = x - h * dwx(x, y)
yn = y - h * dwy(x, y)
if w(xn, yn) < w(x, y):
h *= 2
y = yn
x = xn
if w(xn, yn) > w(x, y):
h /= 2
return w(x,y)
print("Valor da função: ",gradiente(3,1,0.1,1))
|
995,787 | 64cc0cd5fc6d57247138ec19cd97486a303578cf | # this program defines a method that finds the max of three entered value
def MaxOfThree(x, y, z):
Max = x
if y > Max:
Max = y
if z > Max:
Max = z
return Max
x = raw_input('Enter first value: ')
y = raw_input('Enter Second value: ')
z = raw_input('Enter third value: ')
try:
a=float(x)
b=float(y)
c=float(z)
print "\n The maximum off",x, y, z,"is", MaxOfThree(a, b, c)
except:
print "\n The maximum of",x, y, z,"is", MaxOfThree(x, y, z) |
995,788 | 4711f29046e342d5d51b8f266643379e041c7f11 | # -*- coding: utf-8 -*-
import os, sys, subprocess, platform, socket
try:
from discord_webhook import DiscordWebhook, DiscordEmbed
except ImportError:
if sys.version_info[0] == 2:
os.system('pip install discord_webhook')
else:
os.system('pip3 install discord_webhook')
from discord_webhook import DiscordWebhook, DiscordEmbed
# global vars..
PLATFORM = platform.system()
# basic set for discord webhook
STATUS = "status"
WARNING = "warning"
WEBHOOKS = {
STATUS:"https://discord.com/api/webhooks/...상태알림받을웹훅주소",
WARNING:"https://discord.com/api/webhooks/...경고알림받을웹훅주소"
}
COLORS = { STATUS: 242424, WARNING: 16711680 }
WHLIST = dict()
SVR_NAME = socket.gethostname().upper() # 서버 명 직접입력 가능
#SVR_NAME = "서버명"
#WATCH_PATH = ['C:', 'P:', 'X:'] # 감시할 경로 입력:윈도우
WATCH_PATH = ['/dev/sda1', '/mnt/gdrive'] # 감시할 경로 입력: 리눅스 dev명 또는 경로명 적절히 입력
DISK_ALARM_LIMIT = "75" # 경고알람 기준
TITLES = {
STATUS :"[알림] ({svr})서버 파일시스템 사용량 정보 (기준: {limit}%)".format(svr=SVR_NAME, limit=DISK_ALARM_LIMIT),
WARNING:"[경고] ({svr})서버 파일시스템 사용량 경보 (기준: {limit}%)".format(svr=SVR_NAME, limit=DISK_ALARM_LIMIT)
}
MSG_QUEUE = { STATUS: [], WARNING: [] }
def init_discord_webhook():
for whkey in WEBHOOKS.keys():
wh = DiscordWebhook(url=WEBHOOKS[whkey])
WHLIST[whkey] = [wh, 0]
def get_discord_webhook(whkey):
return WHLIST[whkey]
def send_discord_msg():
for whkey in WEBHOOKS.keys():
mqueue = MSG_QUEUE[whkey]
if len(mqueue) == 0: continue
wh = get_discord_webhook(whkey)
embed = DiscordEmbed(title=TITLES[whkey], color=COLORS[whkey])
embed.set_author(name=SVR_NAME)
path = "\n".join([x[0] for x in mqueue])
used = "\n".join([x[2]+"/"+x[1] for x in mqueue])
usep = "\n".join([x[3] for x in mqueue])
embed.add_embed_field(name="경로", value=path, inline=True)
embed.add_embed_field(name="사용/전체", value=used, inline=True)
embed.add_embed_field(name="사용률", value=usep, inline=True)
embed.set_timestamp()
wh[0].add_embed(embed)
response = wh[0].execute()
print (response)
def is_limit_over(usep):
nusep = int(usep.replace('%', ''))
return (nusep > int(DISK_ALARM_LIMIT))
def check_disk_usage_linux():
#cmd
cmd_df = os.popen('which df').read().split('\n')[0].strip()
cmd_egrep = os.popen('which egrep').read().split('\n')[0].strip()
str_egrep = "|".join(WATCH_PATH)
cmd = '{df} -h | {grep} "{gstr}"'.format(df = cmd_df, grep = cmd_egrep, gstr=str_egrep)
#for line in subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).split('\n'):
for line in os.popen(cmd).read().split('\n'):
if line == '': continue
data = line.split()
fs = data[5] ####### 1: 마운트dev 기준, 5: 경로기준
total = data[1]
use = data[2]
usep= data[4]
if is_limit_over(usep): whtype = WARNING
else: whtype = STATUS
MSG_QUEUE[whtype].append([fs, total, use, usep])
def check_disk_usage_windows():
cmd = 'wmic logicaldisk get deviceid,size,freespace'.split()
for line in subprocess.check_output(cmd).decode('utf-8').split('\r\n'):
if line.startswith('rn'): continue
try:
data = line.split()
if data[0] not in WATCH_PATH: continue
nfr = float(data[1]) / (1024 * 1024 * 1024)
nto = float(data[2]) / (1024 * 1024 * 1024)
nus = nto - nfr
fs = data[0]
total = str(int(nto)) + "G"
use = str(int(nto-nfr)) + "G"
usep = str(int(nus/nto*100)) + "%"
if is_limit_over(usep): whtype=WARNING
else: whtype = STATUS
MSG_QUEUE[whtype].append([fs, total, use, usep])
except:
print ("Exception accured: ignored")
def check_disk_usage():
if PLATFORM == 'Windows': check_disk_usage_windows()
else: check_disk_usage_linux()
# MAIN
init_discord_webhook()
check_disk_usage()
send_discord_msg()
|
995,789 | a7370a6259cbec0b236bbc89cef7086898838aa9 | '''
Script for retrieving the list of ICPE documents
'''
import os
import random
from typing import List
from urllib.request import HTTPError, urlretrieve # type: ignore
from envinorma.models.document import Document, DocumentType
from tqdm import tqdm
from tasks.data_build.filenames import CQUEST_URL, DOCUMENTS_FOLDER, GEORISQUES_DOWNLOAD_URL
from tasks.data_build.load import load_documents_from_csv
_BAR_FORMAT = '{l_bar}{r_bar}'
def _download_file_if_doesnt_exist(source: str, destination: str) -> bool:
if os.path.exists(destination):
print('file exists')
return False
try:
urlretrieve(source, destination)
except HTTPError as exc:
print(source, exc)
return True
return False
def _download_document(url: str) -> None:
full_url = CQUEST_URL + '/' + url
destination = DOCUMENTS_FOLDER + '/' + url.replace('/', '_')
found = _download_file_if_doesnt_exist(full_url, destination)
if not found:
print('Not found, attempting georisques.')
source_georisques = GEORISQUES_DOWNLOAD_URL + '/' + url
_download_file_if_doesnt_exist(source_georisques, destination)
def _download_documents(documents: List[Document]) -> None:
for doc in tqdm(documents, 'Downloading documents.', bar_format=_BAR_FORMAT):
_download_document(doc.url_doc)
if __name__ == '__main__':
ALL_DOCS = [doc for doc in load_documents_from_csv('all') if doc.type == DocumentType.AP]
_download_documents(random.sample(ALL_DOCS, 100))
|
995,790 | cbdde0ea3727bbdf1857cfdd8710659b7f82eb07 | class BaseHandler(object):
test2_field = 'test2'
|
995,791 | eed0924cd76249e2cc4dbf9b21b586267443b0f2 | import kin
# each motor is given the positional id ab, with a the leg number (0 being the one closest to the power plug),
# and b among 0, 1, 2 in proximo distal order.
# that define the global order or motors, as the numerical one.
pos_name = set(['position', 'goal_position', 'cw_angle_limit', 'ccw_angle_limit', 'present_position'])
n = 4
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
spiders_ids = (( 8, 7, 9, 15, 11, 12, 10, 4, 16, 13, 2, 18, 6, 5, 1, 17, 3, 14),
(14, 16, 18, 8, 10, 12, 2, 4, 6, 1, 3, 5, 7, 9, 11, 13, 15, 17),
( 1, 3, 5, 7, 9, 11, 13, 15, 17, 14, 16, 18, 8, 10, 12, 2, 4, 6),
( 1, 3, 5, 13, 15, 17, 7, 9, 11, 14, 16, 18, 8, 10, 12, 2, 4, 6),
( 7, 9, 11, 1, 3, 5, 13, 15, 17, 14, 16, 18, 8, 10, 12, 2, 4, 6),
)
# This is orientations *after* reodering !
spiders_ori = (( 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1),
( 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1),
( 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1),
( 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1),
( 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1),
)
spiders_off = ((150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 186.0, 150.0, 150.0, 150.0, 150.0, 150.0),
(150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0),
(150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0),
(150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0),
(150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0, 150.0),
)
leg_axis = (( 1, 0), ( 0, 1), ( 1, 0), ( 1, 0), ( 0, 1), ( 1, 0))
leg_dir = (( 1, 1), ( 1, 1), (-1, -1), (-1, -1), (-1, -1), ( 1, 1))
class MotorInterface(object):
def __init__(self, motor, orientation = 1, offset = 150.0):
assert orientation == 1 or orientation == -1
object.__setattr__(self, 'motor', motor)
object.__setattr__(self, 'orientation', orientation)
object.__setattr__(self, 'offset', offset)
def __setattr__(self, name, value):
if name in pos_name:
return self.motor.__setattr__(name, self._pos2real(value))
return self.motor.__setattr__(name, value)
def __getattr__(self, name):
if name in pos_name:
return self._real2pos(self.motor.__getattribute__(name))
return self.motor.__getattribute__(name)
def _pos2real(self, pos):
return (pos - 150.0) * self.orientation + self.offset
def _real2pos(self, pos):
return (pos - self.offset) * self.orientation + 150.0
class SpiderInterface(object):
def __init__(self, ctrl, n = n):
assert len(ctrl.motors) == 18
self.n = n
self.ctrl = ctrl
self.motors = []
for k, m in enumerate(ctrl.motors):
m = self.ctrl.motors[spiders_ids[n][k] - 1]
self.motors.append(MotorInterface(m, spiders_ori[n][k], spiders_off[n][k]))
self._create_legs()
def _create_legs(self):
assert len(self.motors) == 18
self.legs = []
for i in range(6):
self.legs.append(Leg(i, [self.motors[3*i], self.motors[3*i+1], self.motors[3*i+2]]))
def spread(self, angle):
assert angle >= 0
proximal_pose = (150-angle, 150, 150+angle, 150-angle, 150, 150+angle)
for p_i, leg in zip(proximal_pose, self.legs):
leg.proximo.position = p_i
class Leg(object):
"""Hexapod leg.
Is made of three motors (interfaces) given in proximo-distal order.
"""
def __init__(self, number, motors):
assert len(motors) == 3
object.__setattr__(self, 'number', number)
object.__setattr__(self, 'motors', motors)
object.__setattr__(self, 'proximo', motors[0])
object.__setattr__(self, 'middle', motors[1])
object.__setattr__(self, 'distal', motors[2])
def __setattr__(self, name, value):
if hasattr(value, '__iter__'):
if len(value) == 3:
for v_i, m_i in zip(value, self.motors):
if v_i is not None:
setattr(m_i, name, v_i)
else:
raise ValueError, 'Too many or to little value to set: expected 3, got {}.'.format(len(value))
else:
if value is not None:
for motor in self.motors:
setattr(motor, name, value)
def __getattr__(self, name):
return tuple(getattr(motor, name) for motor in self.motors)
def _leg2global(self, pos):
x = leg_dir[self.number][0]*pos[leg_axis[self.number][0]]
y = leg_dir[self.number][1]*pos[leg_axis[self.number][1]]
z = pos[2]
return x, y, z
def _global2leg(self, pos):
x = leg_dir[self.number][0]*pos[leg_axis[self.number][0]]
y = leg_dir[self.number][1]*pos[leg_axis[self.number][1]]
z = pos[2]
return x, y, z
@property
def tip(self):
alpha = self.proximo.position - 150.0
beta = - (self.middle.position - 150.0)
gamma = - (self.distal.position - 150.0)
return self._leg2global(kin.forward_kin(alpha, beta, gamma))
def displace_tip(self, dx, dy, dz):
x, y, z = self.tip
alpha = self.proximo.position - 150.0
beta = - (self.middle.position - 150.0)
gamma = - (self.distal.position - 150.0)
xdx, ydy, zdz = self._global2leg((x+dx, y+dy, z+dz))
alpha, beta, gamma = kin.inverse_kin(xdx, ydy, zdz)
self.proximo.position = alpha + 150.0
self.middle.position = 150.0 - beta
self.distal.position = 150.0 - gamma
|
995,792 | ad78cb6253bacde847a20215a303b38cc642e8ad | import nuke
import nukescripts
import re
class MassivePanel(nukescripts.PythonPanel):
def __init__(self):
nukescripts.PythonPanel.__init__(self, 'MassivePanel', 'com.ohufx.MassivePanel')
############# setting help messages
KnobInfo = " type knob's name in, also you can Ctrl+Drag&Drop from the knob you want to adjust , then if you will click in value knob and back click in Knob field it will automaticly extract knob name for you (trying to make it better in next version :)"
ArrayInfo = "You have to set what exactly you want to change, All - will set same value on xyz, X , Y , Z - will set value only on your selection x,y or z"
ValueInfo = "set new value for selected nodes"
IncrInfo = "set here increment between values, for example if value=3 and increment 2 - you will have 3,5,7,9..."
KindInfo = "put String if you want to set string and finally put Expression if you entering expression"
#############creating knobs
self.Knob = nuke.String_Knob( KnobInfo, "Knob:")
self.Value = nuke.String_Knob(ValueInfo,"Value:")
self.Array = nuke.Enumeration_Knob(ArrayInfo,"Array:",[ "All","X","Y","Z"],)
self.Kind = nuke.Enumeration_Knob(KindInfo,"Kind:",[ "Float","String", "Expression"],)
self.Kind.clearFlag(nuke.STARTLINE)
self.Increment = nuke.String_Knob(IncrInfo,"Increment:","0")
self.Increment.clearFlag(nuke.STARTLINE)
self.Go = nuke.PyScript_Knob("Go", "Go")
############applying knobs to panel in order
for k in (self.Knob,self.Value,self.Increment,self.Array,self.Kind,self.Go):
self.addKnob(k)
############### setting basic function
def knobChanged(self,knob):
if knob in (self.Knob,self.Value,self.Array,self.Increment):
string = self.Knob.value()
if ':' in string:
firstSplit = string.rsplit('.')[1]
self.Knob.setValue(firstSplit)
elif knob == self.Go:
s = self.Value.value()
Knob = self.Knob.value()
Value = self.Value.value()
array = self.Array.value()
kind = self.Kind.value()
incr = self.Increment.value()
incr = float(incr)
u = 0
n = nuke.selectedNodes()
######## setting float values
if array== "All" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u)
u = incr+ u
if array== "X" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u,0)
u = incr+ u
if array== "Y" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u,1)
u = incr+ u
if array== "Z" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u,2)
u = incr+ u
######## setting string values
if array== "All" and kind == "String":
for n in n:
n[Knob].setValue(Value)
if array== "X" and kind == "String":
for n in n:
n[Knob].setValue(Value,0)
if array== "Y" and kind == "String":
for n in n:
n[Knob].setValue(Value,1)
if array== "Z" and kind == "String":
for n in n:
n[Knob].setValue(Value,2)
######## setting expression values
if array== "All" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value)
if array== "X" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value,0)
if array== "Y" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value,1)
if array== "Z" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value,2)
#def addMassivePanel():
####myPanel = MassivePanel()
####return myPanel.addToPane()
#paneMenu = nuke.menu('Pane')
#paneMenu.addCommand('MassivePanel', addMassivePanel)
#nukescripts.registerPanel( 'com.ohufx.MassivePanel', #addMassivePanel)
import nuke
import nukescripts
import re
class MassivePanel(nukescripts.PythonPanel):
def __init__(self):
nukescripts.PythonPanel.__init__(self, 'MassivePanel', 'com.ohufx.MassivePanel')
############# setting help messages
KnobInfo = " type knob's name in, also you can Ctrl+Drag&Drop from the knob you want to adjust , then if you will click in value knob and back click in Knob field it will automaticly extract knob name for you (trying to make it better in next version :)"
ArrayInfo = "You have to set what exactly you want to change, All - will set same value on xyz, X , Y , Z - will set value only on your selection x,y or z"
ValueInfo = "set new value for selected nodes"
IncrInfo = "set here increment between values, for example if value=3 and increment 2 - you will have 3,5,7,9..."
KindInfo = "put String if you want to set string and finally put Expression if you entering expression"
#############creating knobs
self.Knob = nuke.String_Knob( KnobInfo, "Knob:")
self.Value = nuke.String_Knob(ValueInfo,"Value:")
self.Array = nuke.Enumeration_Knob(ArrayInfo,"Array:",[ "All","X","Y","Z"],)
self.Kind = nuke.Enumeration_Knob(KindInfo,"Kind:",[ "Float","String", "Expression"],)
self.Kind.clearFlag(nuke.STARTLINE)
self.Increment = nuke.String_Knob(IncrInfo,"Increment:","0")
self.Increment.clearFlag(nuke.STARTLINE)
self.Go = nuke.PyScript_Knob("Go", "Go")
############applying knobs to panel in order
for k in (self.Knob,self.Value,self.Increment,self.Array,self.Kind,self.Go):
self.addKnob(k)
############### setting basic function
def knobChanged(self,knob):
if knob in (self.Knob,self.Value,self.Array,self.Increment):
string = self.Knob.value()
if ':' in string:
firstSplit = string.rsplit('.')[1]
self.Knob.setValue(firstSplit)
elif knob == self.Go:
s = self.Value.value()
Knob = self.Knob.value()
Value = self.Value.value()
array = self.Array.value()
kind = self.Kind.value()
incr = self.Increment.value()
incr = float(incr)
u = 0
n = nuke.selectedNodes()
######## setting float values
if array== "All" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u)
u = incr+ u
if array== "X" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u,0)
u = incr+ u
if array== "Y" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u,1)
u = incr+ u
if array== "Z" and kind == "Float":
Value = float(Value)
for n in n:
n[Knob].setValue(Value+u,2)
u = incr+ u
######## setting string values
if array== "All" and kind == "String":
for n in n:
n[Knob].setValue(Value)
if array== "X" and kind == "String":
for n in n:
n[Knob].setValue(Value,0)
if array== "Y" and kind == "String":
for n in n:
n[Knob].setValue(Value,1)
if array== "Z" and kind == "String":
for n in n:
n[Knob].setValue(Value,2)
######## setting expression values
if array== "All" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value)
if array== "X" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value,0)
if array== "Y" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value,1)
if array== "Z" and kind == "Expression":
for n in n:
n[Knob].setExpression(Value,2)
#def addMassivePanel():
####myPanel = MassivePanel()
####return myPanel.addToPane()
#paneMenu = nuke.menu('Pane')
#paneMenu.addCommand('MassivePanel', addMassivePanel)
#nukescripts.registerPanel( 'com.ohufx.MassivePanel', #addMassivePanel)
|
995,793 | 40df77949435401eeeae7c6a2c599e5204fa24aa | from app import app
from flask import render_template, request, redirect, flash, url_for, Markup, g, send_from_directory, abort, Response
from app.helpers import allowed_file, writeTex, deleteImgUpload, deletePdf
from werkzeug.utils import secure_filename
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from operator import itemgetter
import os, re, jinja2, sys
# Set jinja2 environment in latex syntax so that it doesn't conflict with .tex
PATH = os.path.join(os.path.dirname(__file__),'./templates/tex')
TEMPLATELOADER = jinja2.FileSystemLoader(searchpath=PATH)
LATEX_JINJA_ENV = jinja2.Environment(
block_start_string = '((*',
block_end_string = '*))',
variable_start_string = '(((',
variable_end_string = ')))',
comment_start_string = '((=',
comment_end_string = '=))',
loader = TEMPLATELOADER,
autoescape = False
)
@app.route("/")
def index():
""" Open index page and show current assignments """
with app.app_context():
return render_template("public/index.html")
LATEX_SUBS = (
(re.compile(r"\\"), r"\\textbackslash"),
(re.compile(r"([{}_#%&$])"), r"\\\1"),
(re.compile(r"~"), r"\~{}"),
(re.compile(r"\^"), r"\^{}"),
(re.compile(r'"'), r"''"),
(re.compile(r"\.\.\.+"), r"\\ldots"),
)
def escape_tex(value):
""" Make sure tex syntax is escaped """
newval = value
for pattern, replacement in LATEX_SUBS:
newval = pattern.sub(replacement, newval)
return newval
LATEX_JINJA_ENV.filters['escape_tex'] = escape_tex
TEXTEMPLATE = LATEX_JINJA_ENV.get_template('template.tex')
@app.route("/createpdf", methods=["POST"])
def createpdf():
""" Get form data and render pdf """
with app.app_context():
# Get form data
if request.form:
data = request.form
else:
return 'no form'
msg = {}
msg['name'] = data['name']
msg['role'] = data['role']
msg['unit'] = data['unit']
msg['unitdetail'] = data['unitdetail']
msg['phone'] = data['phone']
msg['email'] = data['email']
msg['employmentdate'] = data['employmentdate']
filename = 'default.png'
if 'img' in request.files:
file = request.files['img']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename).replace("_","")
portraitFilePath = os.path.join(app.config['IMAGE_UPLOADS'], filename)
file.save(portraitFilePath)
if 'presentation' in data:
msg['presentation'] = data['presentation']
if 'edu-title' in data:
msg['edu'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('edu-title'), request.form.getlist('edu-time'))]
msg['edu'].sort(key = itemgetter('title'))
msg['edu'].sort(key = itemgetter('time'), reverse=True)
if 'emp-title' in data:
msg['emp'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('emp-title'), request.form.getlist('emp-time'))]
msg['emp'].sort(key = itemgetter('title'))
msg['emp'].sort(key = itemgetter('time'), reverse=True)
if 'cou-title' in data:
msg['cou'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('cou-title'), request.form.getlist('cou-time'))]
msg['cou'].sort(key = itemgetter('title'))
msg['cou'].sort(key = itemgetter('time'), reverse=True)
if 'ass-title' in data:
msg['ass'] = [{'title': i, 'company': j, 'role': k, 'descr': l, 'time': m} for i,j,k,l,m in zip(request.form.getlist('ass-title'), request.form.getlist('ass-company'), request.form.getlist('ass-role'), request.form.getlist('ass-descr'), request.form.getlist('ass-time'))]
msg['ass'].sort(key = itemgetter('title'))
msg['ass'].sort(key = itemgetter('time'), reverse=True)
cv = TEXTEMPLATE.render(msg = msg, portrait = 'img/' + filename)
pdf = writeTex(cv, app.config["OUT_DIR"], filename)
deleteImgUpload(filename)
return redirect("/getpdf/" + pdf)
@app.route("/getpdf/<pdfname>")
def getpdf(pdfname):
filename = f'{pdfname}.pdf'
with open(os.path.join(app.config['OUT_DIR'], filename), 'rb') as f:
data = f.readlines()
os.remove(os.path.join(app.config['OUT_DIR'], filename))
return Response(data, headers={
'Content-Type': 'application/pdf',
'Content-Disposition': 'attachment; filename=%s;' %filename
})
|
995,794 | 854cf836c72fe5c848c0ebd385a880563953a4af | '''
instalacja lepszego interpretera, lpesze wyswietlanie itp.
# pip install ipython
'''
napis = "Ala ma kota"
print(napis[2])
print(napis[4])
print (napis[0:4])
print (napis[::2])
print (napis[-1])
# for litera in napis:
# print(litera)
# print(litera)
ala=('A', 'l', 'a', ' ', 'm' 'a')
print (ala[0:5])
# wyszukiwanie elemtnu w tupli
print ('y' in ala)
# id wyciąga adres obiektu w pamięci
print(id(ala))
#tupla
a=('a', 1 ,4, 5 ,(3,2),'napis',8)
print (a[4])
print (a[4][1])
# range generuje do zzadanej wartosci licznik
for i in range(10):
print(i) |
995,795 | 5107e820b41cbee675dffcd0f0d6b9210c64c00a | class BTNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# 1. Find the root (First element in the preorder_list)
# 2. Find the left and right subtree (from inorder list via root node)
# 3.
def BTbuildInorderPostorder(inorder, postorder):
if len(postorder) == 0:
return None
root = BTNode(postorder[-1])
left_count = 0
for i in inorder:
if root.data == i:
break
else:
left_count += 1
root.left = BTbuildInorderPostorder(inorder[:left_count] , postorder[: left_count])
root.right = BTbuildInorderPostorder(inorder[left_count+1:], postorder[left_count:-1])
return root
def BTPrint(root):
if root == None:
return
print(f"{root.data}", end = ': ')
if root.left != None:
print(f"{root.left.data}", end = ', ')
if root.right != None:
print(f"{root.right.data}", end = ' ')
print()
BTPrint(root.left)
BTPrint(root.right)
inorder = [4, 2, 5, 1, 6, 3, 7]
postorder = [4, 5, 2, 6, 7, 3, 1]
root = BTbuildInorderPostorder(inorder, postorder)
BTPrint(root)
|
995,796 | 9ff3e847c4b6355a875ae6e02376a0768c06cf41 | import pygame
import time
import random
pygame.init()
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
green = (0,155,0 )
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('Snake')
clock = pygame.time.Clock()
block_size = 10
FPS = 20
smallfont= pygame.font.SysFont(None,25)
medfont= pygame.font.SysFont(None,50)
largefont= pygame.font.SysFont(None,80)
def pause():
paused = True
message_to_screen("Paused",
black,
-70,
"large")
message_to_screen("Press c to play or q to quit",
black,
25)
while paused :
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
paused = False
elif event.key == pygame.K_q:
pygame.quit()
quit()
#gameDisplay.fill(white)
pygame.display.update()
clock.tick(5)
def score(score):
text = medfont.render("Score :"+str(score),True ,black)
gameDisplay.blit(text, [0,0])
def randAppleGen():
randAppleX = round(random.randrange(0, display_width-block_size))#/10.0)*10.0
randAppleY = round(random.randrange(0,display_height-block_size))#/10.0)*10.0
return randAppleX,randAppleY
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
intro = False
if event.key == pygame.K_q:
pygame.quit()
quit()
gameDisplay.fill(white)
message_to_screen("Welcome to Snake",
green,
-100,
"large")
message_to_screen("The objective of the game is to eat red apples",
red,
-30)
message_to_screen("The more apples you eat the longer you get",
black,
10)
message_to_screen("If you run into yourself or the edges you die!",
black,
50)
message_to_screen("press c to play q to quit",
red,
80)
pygame.display.update()
clock.tick(15)
def snake(block_size,snakeList):
for XnY in snakeList:
pygame.draw.rect(gameDisplay,green,[XnY[0],XnY[1],block_size,block_size])
def text_objects(text,color,size):
if size == "small":
textSurface= smallfont.render(text,True,color)
elif size == "medium":
textSurface = medfont.render(text,True,color)
elif size == "large":
textSurface = largefont.render(text,True,color)
return textSurface, textSurface.get_rect()
def message_to_screen(msg,color,y_displace=0,size = "small"):
textSurf, textRect = text_objects(msg,color,size)
#screen_text = font.render(msg,True,color)
#gameDisplay.blit(screen_text,[display_width/2,display_height/2])
textRect.center = (display_width/2),(display_height/2)+y_displace
gameDisplay.blit(textSurf,textRect)
def gameLoop():
gameExit = False
gameOver = False
lead_x = display_width/2
lead_y = display_height/2
lead_x_change = 0
lead_y_change = 0
snakeList=[]
snakeLength = 1
randAppleX,randAppleY=randAppleGen()
while not gameExit:
if gameOver == True:
message_to_screen("Game over!",red,-50,size = "large")
message_to_screen("press c to play again and q to quit",black,50,size = "medium")
pygame.display.update()
while gameOver == True:
#gameDisplay.fill(white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
gameOver = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
gameExit = True
gameOver = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit= True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
lead_x_change = -block_size
lead_y_change = 0
elif event.key == pygame.K_RIGHT:
lead_x_change = block_size
lead_y_change = 0
elif event.key == pygame.K_UP:
lead_y_change = -block_size
lead_x_change = 0
elif event.key == pygame.K_DOWN:
lead_y_change = block_size
lead_x_change = 0
elif event.key == pygame.K_p:
pause()
if lead_x >= display_width-block_size or lead_x < 0 or lead_y >= display_height-block_size or lead_y <0 :
gameOver = True
lead_x += lead_x_change
lead_y += lead_y_change
gameDisplay.fill(white)
AppleThickness = 20
pygame.draw.rect(gameDisplay ,red ,[randAppleX,randAppleY,AppleThickness,AppleThickness])
#pygame.display.rect(where,color,coordinates(top-left,how wide,how tall))
snakeHead = []
snakeHead.append(lead_x)
snakeHead.append(lead_y)
snakeList.append(snakeHead)
if len(snakeList) > snakeLength:
del snakeList[0]
for eachSegment in snakeList[:-1]:
if eachSegment == snakeHead:
gameOver = True
snake(block_size,snakeList)
score(snakeLength-1)
#gameDisplay.fill(red,[50,50,10,10])
pygame.display.update()
"""if lead_x == randAppleX and lead_y == randAppleY:
randAppleX = round(random.randrange(0, display_width-block_size)/10.0)*10.0
randAppleY = round(random.randrange(0,display_height-block_size)/10.0)*10.0
snakeLength+=1"""
"""if lead_x >=randAppleX and lead_x <= randAppleX+AppleThickness:
if lead_y >= randAppleY and lead_y <= randAppleY+AppleThickness:
randAppleX = round(random.randrange(0, display_width-block_size))#/10.0)*10.0
randAppleY = round(random.randrange(0,display_height-block_size))#/10.0)*10.0
snakeLength+=1"""
if lead_x > randAppleX and lead_x < randAppleX + AppleThickness or lead_x + block_size > randAppleX and lead_x + block_size < randAppleX + AppleThickness:
if lead_y > randAppleY and lead_y < randAppleY + AppleThickness or lead_y + block_size > randAppleY and lead_y + block_size < randAppleY + AppleThickness:
randAppleX,randAppleY=randAppleGen()
snakeLength+=1
clock.tick(FPS)
pygame.quit()
quit()
game_intro()
gameLoop()
|
995,797 | 563a8009f7288ae154ce8e4020bcb99b74692ea0 | """this script is used to perform operations on the the speech to text api. the operations are such as
(1) adding a custom model....
this is done using the listener.speech_to_text.create_custom_models().
(2) list the custom models created for the speech to text service.
(3) delete and update models and corpora.
I used three interfaces to access the Ibm speech to text API. The interfaces are
(1)The python-sdk ibm developer cloud.
(2)The python speech to text library. I did some serious work under the hood.
(3)The curl interface using ssh commands
"""
from audio_recognize import Listen
listener = Listen()
print("Say Something")
listener.listen2()
# print("Listing custom models")
#
# result2 = listener.speech_to_text.list_custom_models()
# print(result2)
#
#
# print("Listing corpora")
#
# result3 = listener.speech_to_text.list_corpora(customization_id="5c081c90-29be-11e7-a25c-3515edf602ac")
#
# print(result3) |
995,798 | 9e0591c43f6bc74ee2ce7d7c830b33f27ba01d9f | # Xander Kehoe
import time
maleValues = [1375, 2047, 2233, 2559, 3265]
femaleValues = [945, 2479, 3007, 3398, 4415]
def drawLine(t, x1, y1, x2, y2, colorP="black"): # Basic method to draw lines
t.up()
t.goto(x1, y1)
t.down()
t.pencolor(colorP)
t.goto(x2, y2)
def drawLineWithDots(t, x1, y1, x2, y2, colorP="black"): # Basic method to draw lines with dots at each end
t.pencolor(colorP)
t.up()
t.goto(x1, y1)
t.dot(5)
t.down()
t.goto(x2, y2)
t.dot(5)
def drawTickMarks(t):
for i in range(6): # Drawing vertical x marks
drawLine(t, 40*i, 0, 40*i, 10)
drawLine(t, 0, max(maleValues)/15, 10, max(maleValues)/15) # Drawing maximum y mark for male
drawLine(t, 0, max(femaleValues)/15, 10, max(femaleValues)/15) # Drawing maximum y mark for female
drawLine(t, 0, min(maleValues)/15, 10, min(maleValues)/15) # Drawing minimal y mark for male
drawLine(t, 0, min(femaleValues)/15, 10, min(femaleValues)/15) # Drawing minimal y mark for female
def displayText(t):
t.pencolor("black")
t.up()
t.goto(-3, (max(maleValues)/15)-10)
t.write(max(maleValues), align="right") # Writing maximum y value at max y mark for male
t.goto(-3, (max(femaleValues)/15) - 10)
t.write(max(femaleValues), align="right") # Writing maximum y value at max y mark for female
t.goto(-3, (min(maleValues)/15)-10)
t.write(min(maleValues), align="right") # Writing minimum y value at min y mark for male
t.goto(-3, (min(femaleValues)/15) - 10)
t.write(min(femaleValues), align="right") # Writing minimum y value at min y mark for female
x = 40
for i in range(1970, 2011, 10): # Writing years 1970-2010
t.goto(x, -20)
t.write(str(i), align="center")
x += 40
t.goto(0, -50)
t.write("Two-year College Enrollment (in thousands)") # Writing Title
def main():
import turtle as t
t.hideturtle()
drawLine(t, 0, 0, 200, 0)
drawLine(t, 0, 0, 0, 400)
for i in range(1, 5): # Drawing lines for male
drawLineWithDots(t, 40*i, maleValues[i-1]/15, 40*(i+1), maleValues[i]/15, "blue")
for i in range(1, 5): # Drawing lines for female
drawLineWithDots(t, 40*i, femaleValues[i-1]/15, 40*(i+1), femaleValues[i]/15, "red")
drawTickMarks(t)
displayText(t)
time.sleep(15) # Delay to allow user to actually look at chart so it doesn't immediately close
main()
|
995,799 | c920062cf8850d9e0775f2c00d15fa6e7b959d5b | def ex3(x):
'''
function which break down the number into prime numbers, and return max element with list
:param x: the number we break down into prime numbers
:return: max prime numbers
'''
prime_factors = []
i = 2
while x != 1:
while x % i == 0:
prime_factors.append(i)
x = x / i
i += 1
return max(prime_factors)
print(ex3(600851475143))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.