index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,000 | e8d4bb34f6ae5acede96fa4fa302de7c61fa25a7 | class Solution:
def candy(self, arr: List[int]) -> int:
for e in range(len(arr)):
if e==0:
ans=1
peek=1
last=1
acc=0
else:
if(arr[e]>arr[e-1]):
last+=1
peek=last
ans+=last
acc=0
elif(arr[e]==arr[e-1]):
peek=1
acc=0
last=1
ans+=1
else:
last=1
acc+=1
if(acc==peek):
peek+=1
ans+=1
ans+=acc
return int(ans)
|
20,001 | 5b7712d388e94d6225090fb6b086511a95d9b5bf | """Routing configuration, broken out separately for ease of consultation
without going through the whole app config everything.
Some useful helpers are at the bottom. Be familiar with them!
"""
import re
import floof.model as model
from floof.resource import contextualize
from pyramid.exceptions import NotFound
from sqlalchemy.orm.exc import NoResultFound
def configure_routing(config):
"""Adds route declarations to the app config."""
# Static file access. Separate root for each subdirectory, because Pyramid
# treats these as first-class routables rather than a last-ditch fallback
config.add_static_view('/css', 'floof:assets/css')
config.add_static_view('/files', 'floof:assets/files') # dummy file store
config.add_static_view('/icons', 'floof:assets/icons')
config.add_static_view('/images', 'floof:assets/images')
config.add_static_view('/js', 'floof:assets/js')
# TODO this doesn't actually work
config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico')
r = config.add_route
# Miscellaneous root stuff
r('root', '/')
r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator)
r('reproxy', '/reproxy')
r('log', '/log')
# Registration and auth
r('account.login', '/account/login')
r('account.login_begin', '/account/login_begin')
r('account.login_finish', '/account/login_finish')
r('account.register', '/account/register')
r('account.add_identity', '/account/add_identity')
r('account.persona.login', '/account/persona/login')
r('account.logout', '/account/logout')
r('account.profile', '/account/profile')
# Regular user control panel
r('controls.index', '/account/controls')
r('controls.auth', '/account/controls/authentication')
r('controls.persona', '/account/controls/persona')
r('controls.persona.add', '/account/controls/persona/add')
r('controls.persona.remove', '/account/controls/persona/remove')
r('controls.openid', '/account/controls/openid')
r('controls.openid.add', '/account/controls/openid/add')
r('controls.openid.add_finish', '/account/controls/openid/add_finish')
r('controls.openid.remove', '/account/controls/openid/remove')
r('controls.rels', '/account/controls/relationships')
r('controls.rels.watch', '/account/controls/relationships/watch')
r('controls.rels.unwatch', '/account/controls/relationships/unwatch')
r('controls.info', '/account/controls/user_info')
r('controls.certs', '/account/controls/certificates')
r('controls.certs.add', '/account/controls/certificates/add')
r('controls.certs.generate_server',
'/account/controls/certificates/gen/cert-{name}.p12')
r('controls.certs.details',
'/account/controls/certificates/details/{serial:[0-9a-f]+}')
r('controls.certs.download',
'/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')
r('controls.certs.revoke',
'/account/controls/certificates/revoke/{serial:[0-9a-f]+}')
# User pages
kw = sqla_route_options('user', 'name', model.User.name)
r('users.view', '/users/{name}', **kw)
r('users.art', '/users/{name}/art', **kw)
r('users.art_by_album', '/users/{name}/art/{album}', **kw)
r('users.profile', '/users/{name}/profile', **kw)
r('users.watchstream', '/users/{name}/watchstream', **kw)
r('albums.user_index', '/users/{name}/albums', **kw)
r('api:users.list', '/users.json')
# Artwork
kw = sqla_route_options('artwork', 'id', model.Artwork.id)
kw['pregenerator'] = artwork_pregenerator
r('art.browse', '/art')
r('art.upload', '/art/upload')
r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw)
r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw)
r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw)
r('art.rate', r'/art/{id:\d+}/rate', **kw)
# Tags
# XXX what should the tag name regex be, if anything?
# XXX should the regex be checked in the 'factory' instead? way easier that way...
kw = sqla_route_options('tag', 'name', model.Tag.name)
r('tags.list', '/tags')
r('tags.view', '/tags/{name}', **kw)
r('tags.artwork', '/tags/{name}/artwork', **kw)
# Albums
# XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has
user_router = SugarRouter(config, '/users/{user}', model.User.name)
album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)
album_router.add_route('albums.artwork', '')
# Administration
r('admin.dashboard', '/admin')
r('admin.log', '/admin/log')
# Debugging
r('debug.blank', '/debug/blank')
r('debug.crash', '/debug/crash')
r('debug.mako-crash', '/debug/mako-crash')
r('debug.status.303', '/debug/303')
r('debug.status.400', '/debug/400')
r('debug.status.403', '/debug/403')
r('debug.status.404', '/debug/404')
# Comments; made complex because they can attach to different parent URLs.
# Rather than hack around how Pyramid's routes works, we can just use our
# own class that does what we want!
# XXX 1: make this work for users as well
# XXX 2: make the other routes work
# XXX 3: possibly find a way to verify that the same logic is used here and for the main routes
parent_route_names = ('art.view', 'user.view')
mapper = config.get_routes_mapper()
parent_routes = [mapper.get_route(name) for name in parent_route_names]
commentables = dict(
users=model.User.name,
art=model.Artwork.id,
)
def comments_factory(request):
# XXX prefetching on these?
type = request.matchdict['type']
identifier = request.matchdict['identifier']
try:
sqla_column = commentables[type]
entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()
except (NoResultFound, KeyError):
# 404!
raise NotFound()
if 'comment_id' not in request.matchdict:
return contextualize(entity.discussion)
# URLs to specific comments should have those comments as the context
try:
return contextualize(
model.session .query(model.Comment)
.with_parent(entity.discussion)
.filter(model.Comment.id == request.matchdict['comment_id'])
.one())
except NoResultFound:
raise NotFound()
def comments_pregenerator(request, elements, kw):
resource = None
comment = kw.get('comment', None)
if comment:
kw['comment_id'] = comment.id
if 'resource' not in kw:
resource = comment.discussion.resource
if not resource:
resource = kw['resource']
# XXX users...
entity = resource.member
kw['type'] = 'art'
kw['identifier'] = entity.id
return elements, kw
r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)
r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)
r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)
class SugarRouter(object):
"""Glues routing to the ORM.
Use me like this:
foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier)
foo_router.add_route('foo_edit', '/edit')
This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the
context will be set to the corresponding `Foo` object.
The reverse works as well:
request.route_url('foo_edit', foo=some_foo_row)
"""
# TODO: support URLs like /art/123-title-that-doesnt-matter
# ...but only do it for the root url, i think
def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None):
self.config = config
self.url_prefix = url_prefix
self.sqla_column = sqla_column
self.sqla_table = sqla_column.parententity
self.parent_router = parent_router
self.sqla_rel = rel
assert (self.parent_router is None) == (self.sqla_rel is None)
# This is the {key} that appears in the matchdict and generated route,
# as well as the kwarg passed to route_url
match = re.search(r'[{](\w+)[}]', url_prefix)
if not match:
raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix))
self.key = match.group(1)
### Dealing with chaining
def chain(self, url_prefix, sqla_column, rel):
"""Create a new sugar router with this one as the parent."""
return self.__class__(
self.config, url_prefix, sqla_column,
parent_router=self, rel=rel)
@property
def full_url_prefix(self):
"""Constructs a chain of url prefixes going up to the root."""
if self.parent_router:
ret = self.parent_router.full_url_prefix
else:
ret = ''
ret += self.url_prefix
return ret
def filter_sqlalchemy_query(self, query, request):
"""Takes a query, filters it as demanded by the matchdict, and returns
a new one.
"""
query = query.filter(self.sqla_column == request.matchdict[self.key])
if self.parent_router:
query = query.join(self.sqla_rel)
query = self.parent_router.filter_sqlalchemy_query(
query, request)
return query
### Actual routing stuff
def add_route(self, route_name, suffix, **kwargs):
"""Analog to `config.add_route()`, with magic baked in. Extra kwargs
are passed along.
"""
kwargs['pregenerator'] = self.pregenerator
kwargs['factory'] = self.factory
self.config.add_route(route_name, self.full_url_prefix + suffix, **kwargs)
def pregenerator(self, request, elements, kw):
"""Passed to Pyramid as a bound method when creating a route.
Converts the arguments to route_url (which should be row objects) into
URL-friendly strings.
"""
# Get the row object, and get the property from it
row = kw.pop(self.key)
kw[self.key] = self.sqla_column.__get__(row, type(row))
if self.parent_router:
# Parent needs its own treatment here, too. Fill in the parent
# object automatically
kw[self.parent_router.key] = self.sqla_rel.__get__(row, type(row))
elements, kw = self.parent_router.pregenerator(request, elements, kw)
return elements, kw
def factory(self, request):
"""Passed to Pyramid as a bound method when creating a route.
Translates a matched URL to an ORM row, which becomes the context.
"""
# This yields the "context", which should be the row object
try:
q = model.session.query(self.sqla_table)
q = self.filter_sqlalchemy_query(q, request)
return q.one()
except NoResultFound:
# 404!
raise NotFound()
def sqla_route_options(url_key, match_key, sqla_column):
"""Returns a dict of route options that are helpful for routes representing SQLA objects.
``url_key``:
The key to use for a SQLA object when calling ``route_url()``.
``match_key``:
The key in the matchdict that contains the row identifier.
``sqla_column``:
The SQLA ORM column that appears in the URL.
"""
def pregenerator(request, elements, kw):
# Get the row object, and get the property from it
row = kw.pop(url_key)
kw[match_key] = sqla_column.__get__(row, type(row))
return elements, kw
def factory(request):
# This yields the "context", which should be the row object
try:
return contextualize(
model.session.query(sqla_column.parententity)
.filter(sqla_column == request.matchdict[match_key])
.one())
except NoResultFound:
# 404!
raise NotFound()
return dict(pregenerator=pregenerator, factory=factory)
def artwork_pregenerator(request, elements, kw):
"""Special pregenerator for artwork URLs, which also include a title
sometimes.
"""
artwork = kw.pop('artwork')
kw['id'] = artwork.id
# n.b.: this won't hurt anything if the route doesn't have {title}, so it's
# calculated and thrown away. bad?
if artwork.title:
kw['title'] = '-' + _make_url_friendly(artwork.title)
else:
kw['title'] = ''
return elements, kw
def _make_url_friendly(title):
"""Given a title that will be used as flavor text in a URL, returns a
string that will look less like garbage in an address bar.
"""
# RFC 3986 section 2.3 says: letters, numbers, and -_.~ are unreserved
return re.sub('[^-_.~a-zA-Z0-9]', '-', title)
def filestore_pregenerator(request, elements, kw):
"""Pregenerator for the filestore, which may run under a different domain
name in the case of a CDN cacher thinger.
"""
cdn_root = request.registry.settings.get('cdn_root')
if cdn_root:
kw['_app_url'] = cdn_root
return elements, kw
|
20,002 | edb082e3542399491f7f90232923744d6e12ae43 | '''---------------- Ex 007 ----------------
Desenvolva um programa que leia as duas notas de um aluno, calcule e
mostre sua média
---------------------------------------'''
n1 = float(input("Digite a primeira nota: "));
n2 = float(input("Digite a segunda nota: "));
media = (n1+n2)/2;
print("A media do aluno foi: {}".format(media)); |
20,003 | fce921cac6013ab6e12cade38054d4f6384a7fa8 | import desserts
class Icecream (desserts.Desserts):
#define attributes
def __init__(self,name,kind):
super().__init__(name,kind)
self.__size=""
self.__price=0
#return size
def get_size(self):
return self.__size
#update size
def update_size(self,new_size):
self.__size=new_size
#return price
def get_price(self):
return self.__price
#update price
def update_price(self,size):
if self.__size == "S":
self.__price=1.49
elif self.__size=="M":
self.__price=1.99
else:
self.__price=2.49
return True
#define calculate cost
def calculate_cost(self):
total = self.__price * self.__size
return total
#print all
def __str__(self):
var=(str(self.__size)+str(self.__price))
return var
|
20,004 | e19ab260e4b346a85a68100320a29175df17b171 | #works as of 1/25/11 (TIFF)
# Random Numbers
from Panda import *
# You can use rand() to get a random number between 0 and 1.
# This puts a panda at a random location:
panda(position = P3(rand(), 0, rand()))
panda(position = P3(rand(), 0, rand()))
# Run this twice - see if the pandas are always in the same place
# Recall that we can use a simple function to do things repeatedly:
def lots(i):
if i > 0:
# Make a panda in a random location here
lots(i-1)
# Use lots to make 20 pandas
# Modify "lots" to make the pandas spread out with x and y between -2 and 2
# instead of between 0 and 1.
# Finally, give each panda a spin - choose a random spin rate and multiply
# this by "time".
start() |
20,005 | ad7ea093fdbbdb8b6aa3ffcf702aee7d818356a3 | import cairo.tree as ct
# Generates datasets of a row of squares or circles
data = np. |
20,006 | 7dafb6d2197e426012e57c1ab684588eb1353664 |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
def imprt(in_file):
df = pd.read_csv(in_file, sep='\t')
print(df)
return df
def price_to_foat(l):
for x in range(len(l)):
l[x] = float(l[x][1:])
return l
def filt(df, price):
print(df[df.item_price > price])
def filt_name(df):
print(df[['item_name', 'item_price']][df.quantity==1])
def create(df):
mx = int(df['item_price'].max())
l = list(df['item_price'])
t = []
r = []
for y in range(0, mx, 3):
r.append(y)
cur = 0
for x in range(len(l)):
if l[x] >= float(y) and l[x] < float(y+3):
cur += 1
t.append(cur)
plt.bar(r,t,align='center')
plt.xticks(r)
plt.show()
if __name__ == '__main__':
data = []
df = imprt('chipotle_orders.csv')
df['item_price'] = pd.Series(price_to_foat(list(df['item_price']))).values
time.sleep(1)
filt(df, 10)
time.sleep(1)
filt_name(df)
time.sleep(1)
create(df)
|
20,007 | bdb9b9b9605fe8166667bcbc876cfd716ca95ca8 | """
while 循环
while 循环条件:
循环内容
"""
# 1.从1 + ... + 100
num = 1 # 定义一个变量存放要 加的数字
sum = 0 # 保存和
while num <= 100:
sum += num
num += 1
print("1+...+100=", sum)
"""
练习1 打印以下图案
*
**
***
****
*****
"""
line = 1 # 定义一个行号
while line <= 5:
print("*"*line)
line += 1
"""
练习2 打印以下图案
* 空格 3 * 1 -----1
*** 空格 2 * 3 -----2
***** 空格 1 * 5 -----3
******* 空格 0 * 7 -----4
空格数 = 总行号-所在行号
*个数 = (所在行号-1)*2 + 1
"""
line = 1
max_line = 5
while line <= max_line:
print(" "*(max_line-line), end="") # 不换行打印
print("*"*((line-1)*2+1))
line += 1
# 2. while ... else else语句表示循环条件不满足时 执行该分支
num = 11
while num <= 10:
print(num)
num += 1
else:
print("循环结束")
print("done")
# 3. pass 用于自语句块 表示什么都不做
if False:
pass
else:
print("add")
# 4. break 退出循环
# continue 退出本次循环 继续下一次循环
num = 0
while num < 100:
num += 1
if num % 10 == 0:
# break # 退出循环
continue # 退出本次循环 接着下一次循环
print(num)
else:
print("循环结束") # 循环中如果使用了break else分支是不执行的
print("done")
|
20,008 | 05c4dcfb3ecbcac4490c42bb31c4ac5f8b949e31 | # Django imports...
from django.contrib import admin
# Local imports...
from .models import Commitment, Need, Recipient, RecipientNeed
@admin.register(Commitment)
class CommitmentAdmin(admin.ModelAdmin):
fields = ('user', 'recipient_need', 'status', 'created', 'updated')
list_display = ('user', 'recipient_need', 'status', 'created', 'updated')
raw_id_fields = ('user', 'recipient_need')
list_select_related = ('user', 'recipient_need')
autocomplete_lookup_fields = {
'fk': ('user', 'recipient_need')
}
@admin.register(Need)
class Need(admin.ModelAdmin):
fields = ('name', 'description')
list_display = ('name', 'description')
@admin.register(Recipient)
class RecipientAdmin(admin.ModelAdmin):
fields = ('first_name', 'last_name', 'phone_number', 'address_1', 'address_2', 'city', 'state', 'zip_code')
list_display = ('first_name', 'last_name', 'phone_number', 'address_1', 'address_2', 'city', 'state', 'zip_code')
@admin.register(RecipientNeed)
class RecipientNeedAdmin(admin.ModelAdmin):
fields = ('recipient', 'need', 'quantity', 'status', 'created', 'updated')
list_display = ('recipient', 'need', 'quantity', 'status', 'created', 'updated')
raw_id_fields = ('recipient', 'need')
list_select_related = ('recipient', 'need')
autocomplete_lookup_fields = {
'fk': ('recipient', 'need')
}
|
20,009 | 40784de5bbc8c1bed57fcfa88ed3b76122882fa0 | import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
df = pd.read_hdf('train.h5', 'train')
df = df._get_numeric_data()
dft = pd.read_hdf('test.h5', 'test')
dft = dft._get_numeric_data()
X_train = df.ix[:, 'x1':].as_matrix();
y_train = df['y']
X_test = dft.ix[:, 'x1':].as_matrix();
y_train = y_train.values.reshape((-1, 1))
# define vars
input_num_units = 100
hidden1_num_units = 128
hidden2_num_units = 256
hidden3_num_units = 512
hidden4_num_units = 128
output_num_units = 45234
epochs = 20
batch_size = 128
model = Sequential([
Dense(output_dim=hidden1_num_units, input_dim=input_num_units, activation='relu'),
Dropout(0.2),
Dense(output_dim=hidden2_num_units, input_dim=hidden1_num_units, activation='relu'),
Dropout(0.2),
Dense(output_dim=hidden3_num_units, input_dim=hidden2_num_units, activation='relu'),
Dropout(0.2),
Dense(output_dim=hidden4_num_units, input_dim=hidden3_num_units, activation='relu'),
Dropout(0.2),
Dense(output_dim=output_num_units, input_dim=hidden4_num_units, activation='relu'),
])
model.compile(loss='sparse_categorical_crossentropy', optimizer='Nadam', metrics=['accuracy'])
model.fit(X_train,y_train,batch_size,epochs)
y_pred = model.predict_classes(X_test)
print(y_pred)
d = {'y':y_pred, 'Id': np.linspace(45324, 45324+y_pred.size-1, num=y_pred.size)}
dfp = df = pd.DataFrame(data=d)
dfp.Id = df.Id.astype(int)
dfp.to_csv('output.csv', index=False)
|
20,010 | 013844bd2e3f3da7a30dbc92d8d7b9ee80dd59c3 | #!/usr/bin/python3
#
# tsviz
#
# a command-line utility to help visualize TypeScript class-dependencies and
# graphs.
#
from argparse import ArgumentParser
import re
import os
debug_output = False
solution_path = "."
allow_loose_module_match = False
module_import_declaration = re.compile("import .* from [\"'](.*)[\"'];.*")
module_require_declaration = re.compile(".*require\([\"'](.*)[\"']\).*")
extension = ".ts"
def debug(txt):
global debug_output
if debug_output:
print(txt)
def get_unix_path(file):
return file.replace("\\", "/")
def get_directory(file):
unix_file = get_unix_path(file)
return os.path.split(unix_file)[0]
def set_working_basedir(root_dir):
global solution_path
solution_path = get_directory(get_unix_path(root_dir))
debug("Base-solution dir set to {0}".format(solution_path))
class Module(object):
def __init__(self, filename):
self.name = self.get_name_from_filename(filename)
self.filename = os.path.abspath(filename)
self.dependant_module_names = []
# dependant modules, as declared in file.
# not subject to transitive dependency-elimination.
self.declared_dependant_modules = []
# dependant modules as visualized in the graph, based on self.declared_dependant_modules.
# subject to transitive dependency-elimination.
self.dependant_modules = []
self.missing_module_names = []
self.has_missing_modules = False
self.is_missing_module = False
self.highlight = False
self.highlighted_dependents = False
self.has_circular_dependencies = False
self.circular_dependencies = []
def get_name_from_filename(self, filename):
if filename.find("/") == -1:
return filename
elif len(solution_path) == 0:
return filename
elif solution_path == ".":
return filename
else:
return filename[len(solution_path)+1::]
def get_friendly_id(self):
return self.name.replace(".", "_").replace("-", "_").replace("/", "_")
def add_dependency(self, module_name):
global extension
if module_name.find("/") == -1 or module_name.endswith(".json"):
# node module. no need to adjust
debug("Info: resolved npm-module or JSON data-file {0}.".format(module_name))
elif not module_name.endswith(extension):
module_name += extension
filename = module_name
if filename not in self.dependant_module_names:
# print("{0}: Adding to dependency: {1}".format(self.name, filename))
self.dependant_module_names.append(filename)
def get_module_references(self, lines):
imports = []
for line in lines:
if line.startswith("import "):
imports.append(line)
if line.find("require("):
imports.append(line)
return imports
def get_module_imports(self, imports):
result = []
for item in imports:
match = module_import_declaration.match(item)
if match:
module = match.groups()[0]
full_module_path = self.get_module_path(module)
result.append(full_module_path)
match = module_require_declaration.match(item)
if match:
module = match.groups()[0]
full_module_path = self.get_module_path(module)
result.append(full_module_path)
return result
def get_module_path(self, module):
if module.find("/") != -1:
return os.path.abspath(os.path.join(os.path.dirname(self.filename), module))
else:
return module
def get_declared_module_dependencies(self):
lines = get_lines_from_file(self.filename)
import_lines = self.get_module_references(lines)
imports = self.get_module_imports(import_lines)
return imports
def apply_declared_module_dependencies(self):
imports = self.get_declared_module_dependencies()
for item in imports:
self.add_dependency(item)
def resolve_modules_from_names(self, modules):
global allow_loose_module_match
for name in self.dependant_module_names:
module = get_module_by_filename(name, modules)
if module is None and allow_loose_module_match:
module = get_module_by_loose_name(name, modules)
# check if we still haven't matched up!
if module is None:
print("ERROR! Failed to resolve dependency {0} in module {1}!".format(name, self.name))
# track missing deps consistently
missing_module_id = name.replace("-", "")
module = Module(missing_module_id)
module.is_missing_module = True
modules.append(module)
if module.is_missing_module:
self.has_missing_modules = True
self.missing_module_names.append(module.name)
self.dependant_modules.append(module)
self.declared_dependant_modules = self.dependant_modules
def remove_transitive_dependencies(self):
# if A depends on B & C, and
# B also depends on C, then
# A has a transitive dependency on C through B.
# This is a dependency which can be eliminated to clean up the graph.
# clone list to have separate object to work on
project_deps = self.dependant_modules[:]
# investigate each direct sub-dependency as its own tree
for dep in self.dependant_modules:
# calculate all dependencies for this one tree
nested_deps = dep.get_nested_dependencies()
# check if any of those are direct dependencues
for nested_dep in nested_deps:
# if so, remove them
if nested_dep in project_deps:
debug("--Project {0}-- Removed transitive dependency: {1} (via {2})".format(self.name, nested_dep.name, dep.name))
project_deps.remove(nested_dep)
eliminated_deps = len(self.dependant_modules) - len(project_deps)
if eliminated_deps != 0:
debug("--Project {0}-- Eliminated {1} transitive dependencies. Was {2}. Reduced to {3}".format(self.name, eliminated_deps, len(self.dependant_modules), len(project_deps)))
self.dependant_modules = project_deps
def get_nested_dependencies(self):
total_deps = []
self.add_nested_dependencies_to(total_deps)
return total_deps
def add_nested_dependencies_to(self, all_deps):
for dep in self.dependant_modules:
if dep not in all_deps:
all_deps.append(dep)
dep.add_nested_dependencies_to(all_deps)
def has_highlighted_dependencies(self):
allDeps = self.get_nested_dependencies()
for dep in allDeps:
if dep.highlight:
return True
return False
def has_declared_highlighted_dependencies(self):
declaredDeps = self.declared_dependant_modules
for dep in declaredDeps:
if dep.highlight:
return True
return False
def detect_circular_dependencies(self):
all_nested_deps = self.get_nested_dependencies()
for dep in all_nested_deps:
for subdep in dep.declared_dependant_modules:
if subdep == self:
print("WARNING: Circular dependency detected! Module {0} and {1} depends on each other!".format(self.name, dep.name))
self.has_circular_dependencies = True
self.circular_dependencies.append(dep)
def get_module_by_filename(filename, modules):
for module in modules:
if module.filename == filename:
return module
return None
def get_module_by_loose_name(name, modules):
basename = os.path.basename(name).lower()
for module in modules:
if os.path.basename(module.filename).lower() == basename:
return module
return None
def get_lines_from_file(file):
with open(file, 'r', encoding="utf-8") as f:
contents = f.read()
# detect byte order marker. messes up first line in file.
# this first line is often an import!
bytes = contents.encode('utf-8')
#print(bytes[0:3])
if bytes[0:2] == b'\xef\xff':
print("BOM detected!")
contents = contents[2:]
if bytes[0:2] == b'\xef\xbb':
#print("BOM (3-byte) detected!")
contents = contents[1:]
lines = contents.split("\n")
# print(lines[0])
return lines
def sort_modules(modules):
modules.sort(key=lambda x: x.name)
def get_tsfiles_in_dir(root_dir):
global extension
from fnmatch import fnmatch
results = []
for path, subdirs, files in os.walk(root_dir):
for name in files:
if fnmatch(name, "*" + extension):
results.append(os.path.join(path, name))
# fallback to JS if no typescript
if results == []:
extension = ".js"
for path, subdirs, files in os.walk(root_dir):
for name in files:
if fnmatch(name, "*" + extension):
results.append(os.path.join(path, name))
return results
def get_modules(tsfiles):
modules = []
for tsfile in tsfiles:
modules.append(Module(tsfile))
return modules
def process_modules(modules):
# all projects & dependencies should now be known. lets analyze them
for module in modules:
module.resolve_modules_from_names(modules)
# once all modules have resolved their dependencies, we can try to
# detect ciruclar dependencies!
for module in modules:
module.detect_circular_dependencies()
# format results in a alphabetical order
sort_modules(modules)
for module in modules:
sort_modules(module.dependant_modules)
def remove_transitive_dependencies(projects):
for project in projects:
project.remove_transitive_dependencies()
def filter_modules(rx, projects):
result = []
for project in projects:
if not rx.match(str.lower(project.filename)):
result.append(project)
else:
debug("Info: Excluding project {0}.".format(project.name))
return result
def highlight_modules(rx, projects):
for project in projects:
if rx.match(str.lower(project.name)):
debug("Highlighting project {0}".format(project.name))
project.highlight = True
for project in projects:
if project.highlight:
deps = project.get_nested_dependencies()
for dep in deps:
dep.highlighted_dependents = True
def render_dot_file(projects, highlight_all=False, highlight_children=False):
lines = []
lines.append("digraph {")
lines.append(" rankdir=\"LR\"")
lines.append("")
lines.append(" # apply theme")
lines.append(" bgcolor=\"#222222\"")
lines.append("")
lines.append(" // defaults for edges and nodes can be specified")
lines.append(" node [ color=\"#ffffff\" fontcolor=\"#ffffff\" ]")
lines.append(" edge [ color=\"#ffffff\" ]")
lines.append("")
lines.append(" # module declarations")
# define projects
# create nodes like this
# A [ label="First Node" shape="circle" ]
for project in projects:
id = project.get_friendly_id()
styling = ""
if project.highlight or project.highlighted_dependents:
styling = " fillcolor=\"#30c2c2\" style=filled color=\"#000000\" fontcolor=\"#000000\""
elif project.is_missing_module:
styling = " fillcolor=\"#f22430\" style=filled color=\"#000000\" fontcolor=\"#000000\""
elif project.has_missing_modules:
styling = " fillcolor=\"#616118\" style=filled color=\"#000000\" fontcolor=\"#000000\""
elif project.has_circular_dependencies:
styling = " fillcolor=\"#ff0000\" style=filled color=\"#000000\" fontcolor=\"#cccc00\""
lines.append(" {0} [ label=\"{1}\" {2} ]".format(id, project.name, styling))
# apply dependencies
lines.append("")
lines.append(" # project dependencies")
for project in projects:
proj1_id = project.get_friendly_id()
for proj2 in project.dependant_modules:
if proj2 is None:
print("WARNING: Unable to resolve dependency with ID {0} for project {1}".format(id, project.name))
else:
proj2_id = proj2.get_friendly_id()
styling = ""
if proj2.highlight or ((project.highlight or project.highlighted_dependents) and proj2.highlighted_dependents) or proj2.has_declared_highlighted_dependencies() or (highlight_all and proj2.has_highlighted_dependencies()):
styling = " [color=\"#30c2c2\"]"
elif proj2.is_missing_module or (project.has_missing_modules and proj2.has_missing_modules):
styling = " [color=\"#f22430\"]"
elif project.has_circular_dependencies and proj2.has_circular_dependencies:
styling = " [color=\"#ff0000\"]"
lines.append(" {0} -> {1}{2}".format(proj1_id, proj2_id, styling))
lines.append("")
lines.append("}")
return "\n".join(lines)
def process(root_dir, dot_file, exclude, highlight, highlight_all, highlight_children, keep_deps):
set_working_basedir(root_dir)
module_files = get_tsfiles_in_dir(root_dir)
modules = get_modules(module_files)
if exclude:
debug("Excluding projects...")
excluder = re.compile(str.lower(exclude))
modules = filter_modules(excluder, modules)
# pull in dependencies declared in TS-files.
# requires real files, so cannot be used in test!
for module in modules:
module.apply_declared_module_dependencies()
process_modules(modules)
if not keep_deps:
debug("Removing redundant dependencies...")
remove_transitive_dependencies(modules)
if highlight:
debug("Highlighting projects...")
highlighter = re.compile(str.lower(highlight))
highlight_modules(highlighter, modules)
txt = render_dot_file(modules, highlight_all, highlight_children)
with open(dot_file, 'w') as f:
f.write(txt)
print("Wrote output-file '{0}'.".format(dot_file))
def main():
global debug_output, allow_loose_module_match
p = ArgumentParser()
p.add_argument("--input", "-i", help="The root directory to analyze.")
p.add_argument("--output", "-o", help="The file to write to.")
p.add_argument("--loose", "-l", action="store_true", help="Allow loose matching of modules (may be required with path-aliases!)")
p.add_argument("--keep-declared-deps", "-k", action="store_true", help="Don't remove redundant, transisitive dependencies in post-processing.")
p.add_argument("--verbose", "-v", action="store_true", help="Enable verbose output")
p.add_argument("--exclude", "-e", help="Filter modules matching this expression from the graph")
p.add_argument("--highlight", help="Highlights modules matching this expression in the graph")
p.add_argument("--highlight-all", action="store_true", help="Highlight all paths leading to a highlighted project")
p.add_argument("--highlight-children", action="store_true", help="Highlight all child-dependencies of highlighted project")
args = p.parse_args()
debug_output = args.verbose
allow_loose_module_match = args.loose
process(args.input, args.output, args.exclude, args.highlight, args.highlight_all, args.highlight_children, args.keep_declared_deps)
# don't run from unit-tests
if __name__ == "__main__":
main()
|
20,011 | 61cc9c66adbf1d0d61eec6985b2b82f0f10b8bd4 | from datetime import datetime
import json
# import konlpy
# from konlpy.tag import Kkma, Komoran, Hannanum
# import os
# print(os.getcwd())
#//// -> C:\Users\Playdata\Desktop\정책분석>
def getPostaggingResult(searchQuery):
pass
## //// searchQuery로 들어온 단어를 'kkma'함수를 활용하여 result_list를 만들어야 함
#
# ##파라미터로 받아들어온 값(searchQuery): '개발인공지능빅데이터'
# searchQuery_list = list()
#
# komoran = Komoran()
# KomoranList = komoran.nouns(searchQuery) #결과 자체가 list로 반환됨
#
# #//// 1글자 데이터는 삭제(final.json에서도 자체적으로 '1글자 데이터'는 모두 삭제했음)
# #//// 문제: '빅데이터'는 자체적으로 '빅'과 '데이터'로 분할.
# #//// 검색의 정확성?을 높일 수 있는 방법 없을까?: ex) 빅데이터 - 데이터 - 공공데이터 등의 연관성 높이기?
#
# for word in KomoranList:
# if len(word) < 2:
# KomoranList.remove(word)
#
# result = KomoranList
#
# return result
#///// => searh_db 가져오는 함수
def loadSearchInfo():
f = open("./final.json", 'r', encoding='utf-8')
readLine = ""
while True:
line = f.readline()
if not line: break
readLine = readLine + line.strip()
f.close()
initDict4Search = json.loads(readLine)
searchDataList = initDict4Search['data4search']
return searchDataList
#//// => check_db 가져오는 함수
def loadCheckInfo():
f2 = open("./final2.json", 'r', encoding='utf-8')
readLine2 = ""
while True:
line2 = f2.readline()
if not line2: break
readLine2 = readLine2 + line2.strip()
f2.close()
checkDataList = json.loads(readLine2)
return checkDataList
#checkOption_DB_List 예시
# {
# "documnet_number" : "1",
# "documnet_info" :
# {
# "locationCode" : "경기",
# "location" : "경기 성남시",
# "interestPol" : "교육, 훈련",
# "yearMin" : "99999",
# "yearMax" : "99999",
# "startDate" : "99999",
# "endDate" : "99999"
# }
#
# }
def getCheckResult(userQuery):
checkOption_DB_List = loadCheckInfo()
print("[" + str(datetime.now()) + "] Database(list) for check Option is loaded: Total {0} policy..".format(len(checkOption_DB_List)))
userLocationCode = userQuery['locationCode']
userLocation= userQuery['location']
userInterestPol = userQuery['interestPol']
userAge = userQuery['year']
print("[" + str(datetime.now()) + "] Words in User Check: 지역(시/도)='{0}', 지역(상세)='{1}', 관심분야='{2}', 나이='{3}'".format(userLocationCode, userLocation, userInterestPol, userAge))
checkResult = list()
for db_dict in checkOption_DB_List:
if userLocationCode == db_dict['locationCode']:
if userLocation == db_dict['location']:
if userInterestPol == db_dict['interestPol'] or userInterestPol == '전체':
if userAge >= db_dict['yearMin'] and userAge <= db_dict['yearMax']:
curYear = datetime.today()
dbYear_Start = datetime.strptime(str(db_dict['startDate']), "%Y%m%d")
dbYear_End = datetime.strptime(str(db_dict['endDate']), "%Y%m%d")
if curYear > dbYear_Start and curYear <dbYear_End:
checkResult.append(db_dict['PolicyID'])
print("[" + str(datetime.now()) + "] Total Check Result Count: {0}".format(len(checkResult)))
return checkResult
#///// => search Result를 구하는 함수
def getSearchResult(searchQuery):
#search4Word_DB_List = final.json 파일
search4Word_DB_List = loadSearchInfo()
print("[" + str(datetime.now()) + "] Database(list) for word based Searching is loaded: Total {0} words..".format(len(search4Word_DB_List)))
# qWordList = []
qWordList = ['개발', '데이터', '인공지능']
# qWordList = getPostaggingResult(searchQuery)
print("[" + str(datetime.now()) + "] Words in User Query: {0}".format(searchQuery), "->", str(qWordList))
searchDocDict = {}
for db_dict in search4Word_DB_List:
for qWord in qWordList:
isExist = False
addCnt = 0
if db_dict['keyWord'] == qWord:
isExist = True
for doc_dict in db_dict['DocList']:
if searchDocDict.get(str(doc_dict['doc_id'])) == None:
searchDocDict[str(doc_dict['doc_id'])] = doc_dict['tfidf']
else:
searchDocDict[str(doc_dict['doc_id'])] = searchDocDict.get(str(doc_dict['doc_id'])) + doc_dict['tfidf']
#//// tfidf의 점수를 왜 합칠까? 다시 Logic 이해하기
addCnt = addCnt + 1
if isExist:
print("[" + str(datetime.now()) + "] '{0}' word Search Success. {1} results".format(qWord, addCnt))
print("[" + str(datetime.now()) + "] Total Search Result Count: {0}".format(len(searchDocDict)))
searchDocDict_sorted = sorted(searchDocDict.items(), key=(lambda x: x[1]), reverse = True)
return dict(searchDocDict_sorted)
def searchMain(userQuery):
# ////getCheckResult 함수 제작해야 함
# ////checkOption을 만족하는 'documet 번호'가 담긴 result_list를 output으로
# ////=> return ["d1", "d3", "d6"]
checkResult = getCheckResult(userQuery['checkOption'])
print("[" + str(datetime.now()) + "] Check Results :", checkResult)
searchResult = getSearchResult(userQuery['searchOption'])
print("[" + str(datetime.now()) + "] Search Results :", searchResult)
checkResult_Set = set(checkResult)
commonResult_Set = {}
if len(searchResult) == 0:
if userQuery['searchOption'] == "":
commonResult_Set = checkResult_Set
else:
searchResult_DocIDSet = set(searchResult.keys())
commonResult_Set = searchResult_DocIDSet & checkResult_Set
print("[" + str(datetime.now()) + "] Finally {0} Results is Extracted".format(len(commonResult_Set)))
print("[" + str(datetime.now()) + "] Common (Check & Search) Results :", commonResult_Set)
return list(commonResult_Set)
########################################main
if __name__ == "__main__":
print("[" + str(datetime.now()) + "PreProcessing is Started..")
userQuery = {'checkOption' : {"locationCode" : "경북", "location": "대구", "interestPol" : "교육훈련, 체험, 인턴", "year" : 28}, 'searchOption' : "개발인공지능빅데이터"}
finalSearchResult = searchMain(userQuery)
print("[" + str(datetime.now()) + " Result Print in Main >>>>")
print(type(finalSearchResult))
print(finalSearchResult)
if len(finalSearchResult) == 0:
print("[" + str(datetime.now()) + "] 검색 결과가 없습니다. 검색어를 다시 입력해 주세요.")
else:
print("[" + str(datetime.now()) + "] {0}개의 검색 결과가 있습니다.".format(len(finalSearchResult)))
for result in finalSearchResult:
print(result)
print("[" + str(datetime.now()) + "Preprocessing is Finished") |
20,012 | 41094e5779b443cfa606ace3c7742510f85c6f1b | """
@name: subgraph_sorters.py
@description:
Module for sorting subgraphs
@author: Phoebe Cullen
@email: "cm20pic"+ <at>+ "leeds"+ "."+ "ac"+ "."+ "uk"
@date: 2021-07-09
"""
import csv
def loop_through_subgraphs(func):
def wrapper(*args,**kwargs):
#Open grouping files and store as dict
fptr,cfg = args
with open(cfg['groupings'], mode ="r") as inp:
subgraph_grp = {row[0]:row[1] for row in csv.reader(inp)}
#Get isoclasses
isoclasses = None
if cfg['isoclasses'] not in ['ALL','all','All']:
isoclasses = [int(n) for n in cfg['isoclasses'].split(',')]
data = []
for row in csv.reader(fptr):
row[3] = int(row[3])
if isoclasses and (row[3] not in isoclasses): continue
_data = func(row[:3],subgraph_grp)
if _data: data.append(row + _data)
return data
return wrapper
@loop_through_subgraphs
def unique_groups(*args,**kwargs):
vertices,subgraph_grp = args
try:
groups = [subgraph_grp[v] for v in vertices]
unique_grps = len(set(groups))
return groups + [unique_grps]
except KeyError:
print(f"KeyError for row: {vertices}")
return None
@loop_through_subgraphs
def cell_position(*args, **kwargs):
vertices, cell_class = args
try:
groups = [cell_class[v] for v in vertices]
return groups
except KeyError:
print(f"KeyError for row: {vertices}")
return None
""""
# Template function
@loop_through_subgraphs
def sorting_function(*args,**kwargs):
# do stuff
"""
|
20,013 | 84244bac88a22578d68458544fa2bda7db387e8a | size=int(input())
list=input().split()
for i in range(size):
list[i]=int(list[i])
while list[i]%3==0:
list[i]=list[i]//3
while list[i]%2==0:
list[i]=list[i]//2
isS=True
for i in range(size-1):
if list[i+1]!=list[i]:
isS=False
if isS==False:
print('No')
else:
print('Yes') |
20,014 | 7944267ea24bb7a35c0a29e48b7ad5a75770d690 | import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
from itertools import groupby
dirname = 'img_time'
files = listdir('img_time')
for location_file_name in files:
lines = open(dirname+'/'+location_file_name).readlines()
tokens = [line[:-1].split(',') for line in lines]
data = [{'year':token[0],'mon':token[1],'day':token[2],'hour':token[3],'min':token[4],'sec':token[5]} for token in tokens ]
f = open("%s/%s.total"%(dirname,location_file_name[:-4]),'w')
for key,group in groupby(data,lambda x:x['year']):
f.write("%s:%d\n" %(key,len(list(group))))
f.write(';')
for key,group in groupby(data,lambda x:x['year']+','+x['mon']):
f.write("%s:%d\n" %(key,len(list(group))))
f.close()
"""
# data
x = [0,5,9,10,15]
y = [0,1,2,3,4]
# trick to get the axes
fig,ax = plt.subplots()
# make ticks and tick labels
xticks = range(min(x),max(x)+1,3)
xticklabels = ['2000-01-0'+str(n) for n in range(1,len(xticks)+1)]
# plot data
ax.plot(x,y)
# set ticks and tick labels
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels,rotation=15)
# show the figure
plt.show()
""" |
20,015 | 4f4baa78209f9fdb5d616d24f350d2744430b25d | # Find the sum of even valued Fibonacci sequence numbers under four million.
def solve():
term1 = 1
term2 = 2
answer = 0
while term2 < 4000000:
if term2 % 2 == 0:
answer += term2
term1, term2 = term2, term1 + term2
return answer
if __name__ == "__main__":
print(solve()) |
20,016 | 031ff1c9fc73f863aadc30a305d94fe679285f4b | from brian2 import *
import numpy as np
|
20,017 | 7c5ceb84473f918941db7b7d130def89aac783b4 | import ROOT
import os, sys, re
from common import *
channels = [6,11,5,12,4,13,3,14]
rnd = ROOT.TRandom3()
f = ROOT.TFile.Open("plots/efficiency_v_x/tmp.root")
def has_hit(x,hist):
eff = hist.GetBinContent(hist.GetXaxis().FindBin(x))
if rnd.Rndm() < eff: return True
else: return False
def get_cluster_size(x,thresh,opt=""):
nhits = 0
hits = [0,0,0,0,0,0,0,0]
for i,ch_name in enumerate(channels):
hist = f.Get("h_eff_{}_amp{}".format(ch_name,thresh))
if opt=="3channels":
if ch_name != 4 and ch_name!= 12 and ch_name!= 13: continue
if has_hit(x,hist):
nhits +=1
hits[i] = 1
else :
if has_hit(x,hist):
nhits +=1
hits[i] = 1
nadjacent = 0
ncurrent = 0
for hit in hits:
# if hit update nadjacent_current
if hit > 0 : ncurrent += 1
# if no hit reset nadjacent_current
if hit == 0 : ncurrent = 0
if ncurrent > nadjacent : nadjacent = ncurrent
#print(nhits, nadjacent, hits)
return nhits, nadjacent
def plot_cluster_size(x,thresh,opt=""):
# for a fixed x
ntoys = 1000
hist_nhits = ROOT.TH1F("h_nhits_{}_{}_{}".format(x,thresh,opt),";N hits per event;Fraction of Events",9,-0.5,8.5)
hist_nadjs = ROOT.TH1F("h_nadjs_{}_{}_{}".format(x,thresh,opt),";N hits per cluster;Fraction of Events",9,-0.5,8.5)
hist_hits_v_cluster = ROOT.TH2F("h_nhits_v_nadj_{}_{}_{}".format(x,thresh,opt),";N hits per cluster;N hits per event;Events",9,-0.5,8.5,9,-0.5,8.5)
for toy in range(0,ntoys):
nhits,nadj = get_cluster_size(x,thresh,opt)
hist_nhits.Fill(nhits)
hist_nadjs.Fill(nadj)
hist_hits_v_cluster.Fill(nadj,nhits)
hist_nhits.Scale(1.0/hist_nhits.Integral(0,-1))
hist_nadjs.Scale(1.0/hist_nadjs.Integral(0,-1))
c = ROOT.TCanvas()
hist_nhits.Draw("histe")
c.Print("plots/cluster/nhits_total_{}_{}_{}.pdf".format(x,thresh,opt))
hist_nadjs.Draw("histe")
c.Print("plots/cluster/cluster_size__{}_{}_{}.pdf".format(x,thresh,opt))
hist_hits_v_cluster.Draw("COLZ")
c.Print("plots/cluster/2D_nhits_v_cluster_size_{}_{}_{}.pdf".format(x,thresh,opt))
return hist_nadjs
def scan_cluster_size(opt=""):
thresh=110
xs = [ 20.50 + 0.02*i for i in range(0,6)]
hists = []
ymax = 0
for i,x in enumerate(xs):
hist = plot_cluster_size(x,thresh,opt)
hists.append(hist)
cleanHist(hist,i)
if hist.GetMaximum() > ymax: ymax = hist.GetMaximum()
c = ROOT.TCanvas()
leg = ROOT.TLegend(0.6,0.5,0.88,0.88)
for i,x in enumerate(xs):
leg.AddEntry(hists[i],"x = {} mm".format(x),"l")
hists[i].SetMaximum(1.1*ymax)
if i==0: hists[i].Draw("hist e")
else : hists[i].Draw("hist e same")
leg.Draw()
c.Print("plots/cluster/scan_x_thresh_{}_{}.pdf".format(thresh,opt))
return
def scan_cluster_threshold(opt=""):
x = 20.55
thresholds = range(80,130,10)
hists = []
ymax = 0
for i,thresh in enumerate(thresholds):
hist = plot_cluster_size(x,thresh,opt)
hists.append(hist)
cleanHist(hist,i)
if hist.GetMaximum() > ymax: ymax = hist.GetMaximum()
c = ROOT.TCanvas()
leg = ROOT.TLegend(0.6,0.5,0.88,0.88)
for i,thresh in enumerate(thresholds):
leg.AddEntry(hists[i],"amp > {} mV".format(thresh),"l")
hists[i].SetMaximum(1.1*ymax)
if i==0: hists[i].Draw("hist e")
else : hists[i].Draw("hist e same")
leg.Draw()
c.Print("plots/cluster/scan_thresh_x_{}_{}.pdf".format(x,opt))
return
ROOT.gStyle.SetOptFit(0)
ROOT.gStyle.SetOptStat(0)
# position
# in 4 - 20.55
# in between 4 and 13 - 20.6
#plot_cluster_size(20.55,100)
#plot_cluster_size(20.60,100)
scan_cluster_size()
scan_cluster_threshold()
scan_cluster_size("3channels")
scan_cluster_threshold("3channels")
|
20,018 | 3545abd43809ea95ed4c297986ee3471143ef974 | """docstrings.
Example:
Attributes:
module_level_variable1 (int): Module level variables may be documented in
either the ``Attributes`` section of the module docstring, or in an
inline docstring immediately following the variable.
Todo:
"""
# todo Finish docstring
module_level_variable2 = 98765
"""int: Module level variable documented inline."""
# todo Finish docstring
def function_with_types_in_docstring(param1, param2):
"""docstring.
Args:
param1 (int): The first parameter.
param2 (str): The second parameter.
Returns:
bool: The return value. True for success, False otherwise.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
# todo Finish docstring
def function_with_pep484_type_annotations(param1: int, param2: str) -> bool:
"""docstring.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
The return value. True for success, False otherwise.
"""
# todo Finish docstring
def module_level_function(param1, param2=None, *args, **kwargs):
"""docstring
Args:
param1 (int): The first parameter.
param2 (:obj:`str`, optional): The second parameter. Defaults to None.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
bool: True if successful, False otherwise.
Raises:
AttributeError: The ``Raises`` section is a list of all exceptions that are relevant to the interface.
ValueError: If `param2` is equal to `param1`.
"""
# todo Finish docstring
def example_generator(n):
"""docstring
Args:
n (int): The upper limit of the range to generate, from 0 to `n` - 1.
Yields:
int: The next number in the range of 0 to `n` - 1.
Examples:
Examples should be written in doctest format, and should illustrate how
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
# todo Finish docstring
class ExampleError(Exception):
"""docstring
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
msg (str): Human readable string describing the exception.
code (:obj:`int`, optional): Error code.
Attributes:
msg (str): Human readable string describing the exception.
code (int): Exception error code.
"""
# todo Finish docstring
class ExampleClass(object):
"""docstring
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (:obj:`int`, optional): Description of `attr2`.
"""
# todo Finish docstring
def __init__(self, param1, param2, param3):
"""__init__ docstring
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1 (str): Description of `param1`.
param2 (:obj:`int`, optional): Description of `param2`. Multiple
lines are supported.
param3 (:obj:`list` of :obj:`str`): Description of `param3`.
"""
# todo Finish docstring
self.attr5 = None
"""str: Docstring *after* attribute, with type specified."""
# todo Finish docstring
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
# todo Finish docstring
return 'readonly_property'
def example_method(self, param1, param2):
"""docstring
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
# todo Finish docstring
|
20,019 | e8af0840dbc451264658b28f52efae49188f8420 | import time
stime = time.clock()
per_count = {}
for i in xrange(1,998):
for j in xrange(i,999):
if i + j > 1000:
break
for k in xrange(j,1000):
if i + j + k > 1000:
break
if i**2 + j**2 == k**2:
count = per_count.get((i+j+k))
if count is None:
per_count[(i+j+k)] = 0
per_count[(i+j+k)] += 1
largest = 0
sol = None
for key in per_count:
if per_count[key] > largest:
largest = per_count[key]
sol = key
print sol
print "time taken:", time.clock() - stime |
20,020 | c6ff9b8196f803ed53f3ecb483f40bb37950f548 | import retro
import os
from train_ppo_refactor import get_env
import numpy
from utils import code_location
game = "SuperMarioKart-Snes"
scenario = os.path.join(code_location, "scenarios", game, "custom_rewards.json")
state = os.path.join(retro.data.DATA_PATH, "data", "contrib", game, "MarioCircuit1.GP.50cc.1P.Luigi.Start.state")
filename = "best_acts.txt"
with open(filename, "r") as f:
acts = f.readlines()
acts = [int(x) for x in acts]
env = get_env(game, state, scenario)
env.reset()
cumulative_reward = 0
for k in acts:
obs, rew, done, info = env.step(k)
cumulative_reward += rew
env.render()
print("Reward: {}".format(rew))
if done:
print("Done. Total reward: {}".format(cumulative_reward))
break
|
20,021 | ef9248995b29c60853e6f67fab14624fba6f3771 | import PySimpleGUI as ps
# ToDo: 大まかな画面設計をしてから、コードを書くこと
layout = [
# ToDo: レイアウト配置
]
window = ps.Window("Simple Image Viewer",layout)
|
20,022 | 326d7b925d1a24a86c54563021994e5d1464b2bf | import DataHandler as dh
import WaypointDistributionNN as wdnn
import WaypointBaselineNN as wbnn
import numpy as np
import csv
from numpy.random import multivariate_normal as mltnrm
import torch
#from PlotTrajectory import PlotTraj
import matplotlib.pyplot as plt
def getSampleValues(n, value_range, isLogScale=False) :
if isLogScale :
value_range = np.log(np.array(value_range)).tolist()
values = (value_range[1] - value_range[0])*(np.arange(0,n))/(n-1) + value_range[0]
if isLogScale :
values = np.exp(np.array(values))
return values.tolist()
def Train1Prob(dx, v0x, vf, obs_t, obs_offset, use_baseline=True):
data_handler = dh.DataHandler(100, "optimal_nn.csv", "eval_nn.csv", True, 1)
T_opt, _, _, x = data_handler.getOptimalSolution(
dx, v0x, vf, obs_t, obs_offset)
obs_x=x[13]
obs_y=x[14]
print(obs_x)
print('done')
print(obs_y)
f = open("wpt_data.csv", "w+")
f_writer = csv.writer(f, delimiter=',')
if use_baseline:
f_writer.writerow(["mu_x", "mu_y", "mu_vx", "mu_vy",
"var_x", "var_y", "var_vx", "var_vy",
"mu_cost", "avg_cost", "baseline_error"])
else:
f_writer.writerow(["mu_x", "mu_y", "mu_vx", "mu_vy",
"var_x", "var_y", "var_vx", "var_vy",
"mu_cost", "avg_cost"])
f.close()
x = np.ones(1)
nsamples = 100
net = wdnn.WaypointDistributionNN(len(x), 0.01, 1)
baseline = wbnn.WaypointBaselineNN(len(x), 0.01, 1e2)
fig,ax1=plt.subplots()
n = 1000
if use_baseline:
data = np.zeros([n, 11])
else :
data = np.zeros([n, 10])
for count in range(n):
mu, S = net(x)
data[count,0:4] = mu
data[count,4:8] = np.diag(S[0,:])
print(mu)
print(np.sum(S))
T, T_col = data_handler.Evaluate(dx, v0x, vf, mu[0,:], obs_x, obs_y)
C = data_handler.GetCost(T_opt, T_col, T)
data[count, 8] = C
print("Cost at mu:")
print(C)
wpts = mltnrm(mu[0,:], S[0,:], nsamples)
# if count>990:
# PlotTraj(dx, v0x, vf, obs_t, obs_offset,wpts,ax1)
#
Cs = []
C_tot = 0
print("average cost of distribution:")
for i in range(nsamples):
T, T_col = data_handler.Evaluate(
dx, v0x, vf, wpts[i,:], obs_x, obs_y)
C = data_handler.GetCost(T_opt, T_col, T)
Cs += [C/nsamples]
C_tot += C
print(C_tot/nsamples)
data[count, 9] = C_tot/nsamples
if use_baseline:
deltas = - (np.array(Cs) + baseline(x))
print("baseline error:")
print(np.sum(np.abs(deltas)))
data[count, 10] = np.sum(np.abs(deltas))
baseline.update(-np.array(Cs), np.vstack([x]*nsamples))
net.update(deltas, wpts, np.vstack([x]*nsamples))
else:
net.update(-np.array(Cs), wpts, np.vstack([x]*nsamples))
f = open("wpt_data.csv", "a")
f_writer = csv.writer(f, delimiter=',')
f_writer.writerow(data[count,:])
f.close()
best = np.argmin(data[:,8])
print(data[best,:])
data_handler.Evaluate(dx, v0x, vf, data[best,0:4], obs_x, obs_y)
def GetBestModel(clamp, lr, ss, n, steps, dx, v0x, vf, obs_t, obs_offset):
data_handler = dh.DataHandler(10, "optimal_nn.csv", "eval_nn.csv", True, 2)
T_opt, _, _, x = data_handler.getOptimalSolution(
dx, v0x, vf, obs_t, obs_offset)
obs_x=x[13]
obs_y=x[14]
best_cost = float('inf')
best_mu = float('inf')*np.ones(4)
best_sig = float('inf')*np.ones(4)
for i in range(n):
net = wdnn.WaypointDistributionNN(len(x), lr, clamp)
count = 0
while count < steps:
count += 1
mu, S = net(x)
mu = mu[0,:]
S = S[0,:,:]
sig = np.diag(S)
wpts = mltnrm(mu, S, ss)
Cs = []
C_tot = 0
for i in range(ss):
T, T_col = data_handler.Evaluate(
dx, v0x, vf, wpts[i,:], obs_x, obs_y)
C = data_handler.GetCost(T_opt, T_col, T)
Cs += [C/ss]
C_tot += C
C_avg = C_tot / ss
if C_avg < best_cost:
best_cost = C_avg
best_mu[:] = mu
best_sig[:] = sig
net.update(-np.array(Cs), wpts, np.vstack([x]*ss))
return best_cost, best_mu, best_sig
def HyperSearch(dx, v0x, vf, obs_t, obs_offset):
clamp_values = getSampleValues(10, [1e-1, 1e4], True)
lr_values = getSampleValues(10, [1e-5, 1e-1], True)
ss_values = np.floor(getSampleValues(5, [1e0, 2e2], False)).astype(np.int)
n = 5
steps = 10
best_costs = float('inf')*np.ones([10,10,5])
best_values = np.zeros([10,10,5,8])
hyper = np.zeros([10,10,5,3])
for clamp_idx in range(len(clamp_values)):
for lr_idx in range(len(lr_values)):
for ss_idx in range(len(ss_values)):
clamp = clamp_values[clamp_idx]
lr = lr_values[lr_idx]
ss = ss_values[ss_idx]
hyper[clamp_idx, lr_idx, ss_idx, :] = np.array([clamp, lr, ss])
C, mu, sig = GetBestModel(clamp, lr, ss, n, steps, dx,
v0x, vf, obs_t, obs_offset)
best_costs[clamp_idx, lr_idx, ss_idx] = C
best_values[clamp_idx, lr_idx, ss_idx, 0:4] = mu
best_values[clamp_idx, lr_idx, ss_idx, 4:] = sig
print("Best Cost Found:")
print(np.min(best_costs))
np.savez("hyper_search.np", costs=best_costs,
values=best_values, params=hyper)
return mu,sig
def TestNet():
# net = wdnn.WaypointDistributionNN(4, 0.01) # learns mu, not sigma
net = wdnn.WaypointDistributionNN(1, 0.001, 1e2)
nsamples = 100
x = np.ones(1)
count = 0
while count < 1000:
count += 1
mu, S = net(x)
print(mu)
#print(S[0,:])
print(np.sum(S))
wpts = mltnrm(mu[0,:], S[0,:], nsamples)
print(wpts.shape)
Cs = np.linalg.norm(wpts, axis=1)
#print(Cs)
print(np.sum(Cs)/nsamples)
net.update(-Cs, wpts, np.vstack([x]*nsamples))
print(S)
def TestBaseLine():
net = wbnn.WaypointBaselineNN(4, 0.01, 1e2)
nsamples = 100
for i in range(100):
S = np.identity(4)
wpts = mltnrm(np.array([0,0,0,0]), S, nsamples)
Cs = np.linalg.norm(wpts, axis=1)
deltas = - (Cs + net(wpts))
print(np.sum(np.abs(deltas)))
net.update(-Cs, wpts)
if __name__ == "__main__":
# TestNet()
# TestBaseLine()
dx = np.array([0, 1])
v0x = 1
vf = np.array([0, 1])
obs_t=0.5
obs_offset=0.0
Train1Prob(dx, v0x, vf, obs_t, obs_offset)
# HyperSearch(dx, v0x, vf, obs_t, obs_offset)
|
20,023 | 45153988c5ff3d9fb544def560a75365bac3e9f4 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenID Connect Client """
import logging
import urllib
from error import FlowUserInfoError
from error import FlowTokenInfoError
from tokeninfo import TokenInfo
from userinfo import UserInfo
from apiclient.anyjson import simplejson
import httplib2
from oauth2client.client import OAuth2WebServerFlow, OAuth2Credentials
from oauth2client.client import flow_from_clientsecrets
__author__ = "Maciej Machulak"
__maintainer__ = "Maciej Machulak"
__email__ = "mmachulak@google.com"
__copyright__ = "Copyright 2012 Google Inc. All Rights Reserved."
__license__ = "Apache License 2.0"
__version__ = "0.1"
__status__ = "Prototype"
GOOGLE_OPENIDCONNECT_SCOPE = "https://www.googleapis.com/auth/userinfo.profile"
GOOGLE_TOKENINFO_URI = "https://www.googleapis.com/oauth2/v1/tokeninfo"
GOOGLE_USERINFO_URI = "https://www.googleapis.com/oauth2/v1/userinfo"
def openidconnect_flow_from_clientsecrets(filename, scope = GOOGLE_OPENIDCONNECT_SCOPE, message=None):
"""Create OpenID Connect Flow from a clientsecrets file.
Will create the right kind of Flow based on the contents of the clientsecrets
file or will raise InvalidClientSecretsError for unknown types of Flows.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) to request.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. If message is provided then
sys.exit will be called in the case of an error. If message in not
provided then clientsecrets.InvalidClientSecretsError will be raised.
Returns:
A Flow object.
Raises:
UnknownClientSecretsFlowError if the file describes an unknown kind of Flow.
clientsecrets.InvalidClientSecretsError if the clientsecrets file is
invalid.
"""
# Check if submitted scope contains the Ope
oauth_flow = flow_from_clientsecrets(filename,scope,message)
return OpenIDConnectFlow(client_id = oauth_flow.client_id,
client_secret = oauth_flow.client_secret,
scope = oauth_flow.scope,
user_agent = oauth_flow.user_agent,
auth_uri = oauth_flow.auth_uri,
token_uri = oauth_flow.token_uri)
class VerifiedTokenCredentials(OAuth2Credentials):
"""Credentials verified with the TokenInfo endpoint."""
def __init__(self, oauth_credentials, tokeninfo):
OAuth2Credentials.__init__(self,
oauth_credentials.access_token,
oauth_credentials.client_id,
oauth_credentials.client_secret,
oauth_credentials.refresh_token,
oauth_credentials.token_expiry,
oauth_credentials.token_uri,
oauth_credentials.user_agent,
oauth_credentials.id_token)
self.tokeninfo = tokeninfo
class OpenIDConnectCredentials(VerifiedTokenCredentials):
"""OpenID Connect Credentials received from the UserInfo endpoint."""
def __init__(self, verified_token_credentials, userinfo):
VerifiedTokenCredentials.__init__(self,
verified_token_credentials,
verified_token_credentials.tokeninfo)
self.userinfo = userinfo
class OpenIDConnectFlow(OAuth2WebServerFlow):
"""Does the OpenID Connect flow."""
def __init__(self,
scope=GOOGLE_OPENIDCONNECT_SCOPE,
tokeninfo_uri=GOOGLE_TOKENINFO_URI,
userinfo_uri=GOOGLE_USERINFO_URI,
**kwargs):
"""Constructor for OpenIDConnectFlow.
Args:
tokeninfo_uri: string, URI for TokenInfo endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be
used.
userinfo_uri: string, URI for UserInfo endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be
used.
**kwargs: dict, The keyword arguments require the following parameters
- client_id: string, client identifier.
- client_secret: string client secret.
- scope: string or list of strings, scope(s) of the
credentials being requested.
- user_agent: string, HTTP User-Agent to provide for
this application.
- auth_uri: string, URI for authorization endpoint.
For convenience defaults to Google's endpoints but
any OAuth 2.0 provider can be used.
- token_uri: string, URI for token endpoint. For
conveniencedefaults to Google's endpoints but
any OAuth 2.0 provider can be used
- any other optional parameters for OAuth 2.0
"""
super(OpenIDConnectFlow, self).__init__(scope = scope, **kwargs)
self.tokeninfo_uri = tokeninfo_uri
self.userinfo_uri = userinfo_uri
def step3_verify_access_token(self, credentials, http=None):
"""Verifies access token at the TokenInfo endpoint.
Args:
credentials
Returns:
VerifiedTokenCredentials
Raises:
FlowTokenInfoError
"""
if http is None:
http = httplib2.Http()
resp, content = http.request(self.tokeninfo_uri,
method="POST",
body=urllib.urlencode({'access_token': credentials.access_token}),
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
if resp.status == 200:
# Process the response
d = simplejson.loads(content)
tokeninfo = TokenInfo(d)
logging.debug('Successfully retrieved token info: %s' % tokeninfo)
verified_token_credentials = VerifiedTokenCredentials(credentials,
tokeninfo)
# Perform checks on the token info
if verified_token_credentials.tokeninfo.audience \
!= credentials.client_id:
logging.error('token issued for a different client ' \
'- issued to %s, '
'expected %s.' %
(verified_token_credentials.tokeninfo.audience,
credentials.client_id))
raise FlowTokenInfoError('invalid token')
if int(verified_token_credentials.tokeninfo.expires_in) < 1:
logging.error('token expired')
raise FlowTokenInfoError('token expired')
return verified_token_credentials
else:
logging.error('Failed to retrieve token info: %s' % content)
error_msg = 'Invalid token info response %s.' % resp['status']
try:
data = simplejson.loads(content)
if 'error' in data:
error_msg = data['error']
except Exception:
pass
raise FlowTokenInfoError(error_msg)
def step4_userinfo(self, credentials, http=None):
"""Obtains UserInfo from the UserInfo endpoint.
Args:
credentials
Returns:
OpenIDConnectCredentials
Raises:
FlowUserInfoError
"""
if http is None:
http = httplib2.Http()
http = credentials.authorize(http)
resp, content = http.request(self.userinfo_uri)
if resp.status == 200:
d = simplejson.loads(content)
userinfo = UserInfo(d)
logging.debug('Successfully retrieved user info: %s' % userinfo)
return OpenIDConnectCredentials(credentials, userinfo)
else:
logging.error('Failed to retrieve user info: %s' % content)
error_msg = 'Invalid user info response %s.' % resp['status']
try:
data = simplejson.loads(content)
if 'error' in data:
error_msg = data['error']
except Exception:
pass
raise FlowUserInfoError(error_msg)
def step234_exchange_and_tokeninfo_and_userinfo(self, code, http=None):
"""Exchanges authorization for token, then validates the token and
obtains UserInfo.
Args:
code
Returns:
OpenIDConnectCredentials
Raises:
FlowUserInfoError
"""
if http is None:
http = httplib2.Http()
logging.debug('exchanging code for access token')
credentials = self.step2_exchange(code, http)
logging.debug('verifing access token received from the IDP')
credentials = self.step3_verify_access_token(credentials, http)
logging.debug('using access token to access user info from the IDP')
return self.step4_userinfo(credentials, http)
|
20,024 | 8ad254656be7071d36148e74c4fca09fa1fd72ca | """
【程序12】
题目:判断101-200之间有多少个素数,并输出所有素数。
1.程序分析:判断素数的方法:用一个数分别去除2到sqrt(这个数),如果能被整除,
则表明此数不是素数,反之是素数。
2.程序源代码:
"""
count = 0
for i in range(101, 201):
for j in range(2, i):
if i % j == 0:
break
else:
count += 1
print(i)
print('101到200之间有%d个素数' % count)
|
20,025 | aa7df4649f7cfb2f9ff991a690503d9e4d7babf6 | from datetime import timedelta
import airflow
from airflow.contrib.hooks import SSHHook
from airflow.contrib.operators.ssh_execute_operator import SSHExecuteOperator
from airflow.models import DAG
sshHook = SSHHook(conn_id="delta-crypto")
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(1)
}
dag = DAG(
dag_id='newcomers_top50', default_args=args,
schedule_interval="30 * * * *",
dagrun_timeout=timedelta(minutes=60),
catchup=False
)
create_newcomers_top50 = SSHExecuteOperator(
task_id="create_newcomers_top50",
bash_command="""sudo python /home/ec2-user/projects/crypto-analysis/entry.py newcomers --rank 50 --no 10 --latest""",
ssh_hook=sshHook,
xcom_push=True,
dag=dag)
tweet_newcomers_top50 = SSHExecuteOperator(
task_id='tweet_newcomers_top50',
provide_context=True,
bash_command="""sudo python /home/ec2-user/projects/crypto-analysis/entry.py tweet --rank 50 --id {{ ti.xcom_pull(task_ids='create_newcomers_top50') }}""",
ssh_hook=sshHook,
dag=dag)
create_newcomers_top50.set_downstream(tweet_newcomers_top50)
|
20,026 | 902664c0821f8d0b5b531d3e596fa40d75d2b0db | from core.Model import Model
from core.UIFactory import UIFactory
class Suite(Model):
ID = "suite"
def __init__(self, path):
super().__init__(path)
self.path = path
self.benches = None
self.units = None
self.sequences = None
def getCollection(self, key):
if key == "benches":
return self.benches
if key == "units":
return self.units
if key == "sequences":
return self.sequences
return None
def setCollection(self, key, value):
value = UIFactory.RelativePath(value)
if key == "benches":
self.benches = value
if key == "units":
self.units = value
if key == "sequences":
self.sequences = value
def save(self):
self.data = [
{ "step": "benches", "csv_path": self.benches },
{ "step": "units", "csv_path": self.units },
{ "step": "sequences", "csv_path": self.sequences }
]
super().save()
def load(self):
super().load()
for row in self.data:
if not set(self.fields).issubset(row):
continue
if row["step"] == "benches":
self.benches = row["csv_path"]
if row["step"] == "units" :
self.units = row["csv_path"]
if row["step"] == "sequences":
self.sequences = row["csv_path"]
def getPath(self):
return self.path |
20,027 | e11c5016e95e60b3ddf79621b3464aa8a8fb1778 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from . import models
from django.template import loader
# def index(request):
# if request.method == 'POST':
# # save new post
# first_name = request.POST['first_name']
# last_name = request.POST['last_name']
# username = request.POST['username']
# try:
# user = models.User(first_name=first_name, last_name= last_name,
# username= username)
# user.save()
# return HttpResponse("sign up Done successfully.")
# except:
# return HttpResponse("something went wrong!")
#
# # Get all posts from DB
# else:
# template = loader.get_template('index.html')
# return HttpResponse(template.render({}, request))
from rest_framework_mongoengine import viewsets
from rest_framework.views import APIView
from polls.serializers import UserModelSerializer, FileSerializer
from polls.models import UserModel, StoredFiles
import os
from werkzeug.utils import secure_filename
from django.core.files.storage import default_storage, FileSystemStorage
from django.core.files.base import ContentFile
import uuid
import project.settings as settings
from datetime import datetime
from rest_framework.response import Response
import mimetypes
import urllib
import json
import pickle
import base64
import PIL
from django.core.cache import cache
from django.shortcuts import get_object_or_404
from rest_framework.decorators import action
ALLOWED_EXTENSIONS = set([ 'png', 'jpg', 'jpeg', 'gif'])
class UserModelViewSet(viewsets.ModelViewSet):
serializer_class = UserModelSerializer
# permission_classes = (permissions.IsAuthenticated)
# authentication_classes = (TokenAuthentication,)
def get_queryset(self):
return UserModel.objects.all()
def list(self, request):
if cache.get('usermodel'):
data = cache.get('usermodel')
return Response(data)
else:
queryset = UserModel.objects.all()
serializer = UserModelSerializer(queryset, many=True)
response = Response(serializer.data)
cache.set('usermodel', response.data, 43200)
return response
def retrieve(self, request, id=None):
queryset = UserModel.objects.all()
if cache.get(id):
data = cache.get(id)
return Response(data)
else:
user = UserModel.objects.get(id= id)
serializer = UserModelSerializer(user)
response = Response(serializer.data)
cache.set(id, response.data, 43200)
return response
@action(methods=['get'], detail=False)
def get_name(self, request):
return
def create(self, request):
pass
def update(self, request, id=None):
pass
def partial_update(self, request, id=None):
pass
def destroy(self, request, pk=None):
pass
class FileViewSet(APIView):
serializer_class = FileSerializer
# permission_classes = (permissions.IsAuthenticated)
# authentication_classes = (TokenAuthentication,)
def allowed_file(self, filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def post(self, request, *args, **kwargs):
request.META.get('HTTP_FILENAME')
file = request.FILES.get('newfile')
if not file is None and file.name:
filename = secure_filename(file.name)
temp_name = str(uuid.uuid4()) + '.' + filename.split('.')[-1]
date_folder = datetime.utcnow().date()
date_folder = datetime.strftime(date_folder, "%Y-%m-%d")
hour_folder = str(datetime.utcnow().hour)
tmp_file = os.path.join(settings.MEDIA_ROOT, date_folder+'/'+hour_folder+'/'+temp_name)
path = default_storage.save(tmp_file, ContentFile(file.read()))
StoredFiles.objects.get_or_create(file_name=filename, upload_path=path, stored_name=temp_name)
#FileSystemStorage(location=tmp_file, base_url="negar").save("test", ContentFile(file.read()))
url = request.build_absolute_uri()
if self.allowed_file(file.name):
im = PIL.Image.open(path)
size = 200, 200
im.thumbnail(size)
im.save(tmp_file.split('.')[:-1][0] + "_thumbnail.jpg", "JPEG")
StoredFiles.objects.get_or_create(file_name=filename, upload_path=(tmp_file.split('.')[:-1][0] +
"_thumbnail.jpg"), stored_name=
(tmp_file.split('/')[-1].split('.')[0] + "_thumbnail.jpg"))
return Response({'url': url + temp_name,
'thumbnail': url + tmp_file.split('/')[-1].split('.')[0] + "_thumbnail.jpg"})
else:
return Response({'url': url + temp_name})
return {'error': 'something went wrong!'}
def get(self, request, pk, format=None):
if pk:
selected_file = StoredFiles.objects(stored_name=pk)[0]
file_path = selected_file.upload_path
original_filename = selected_file.file_name
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
type, encoding = mimetypes.guess_type(original_filename)
if type is None:
type = 'application/octet-stream'
#encoded = base64.encodebytes(fh.read()).decode("ascii")
response = HttpResponse(fh.read())
response['Content-Type'] = type
response['Content-Length'] = str(os.stat(file_path).st_size)
if encoding is not None:
response['Content-Encoding'] = encoding
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
if u'WebKit' in request.META['HTTP_USER_AGENT']:
# Safari 3.0 and Chrome 2.0 accepts UTF-8 encoded string directly.
filename_header = 'filename=%s' % original_filename
elif u'MSIE' in request.META['HTTP_USER_AGENT']:
# IE does not support internationalized filename at all.
# It can only recognize internationalized URL, so we do the trick via routing rules.
filename_header = ''
else:
# For others like Firefox, we follow RFC2231 (encoding extension in HTTP headers).
filename_header = 'filename*=UTF-8\'\'%s' % urllib.parse.quote(original_filename)
response['Content-Disposition'] = 'attachment; ' + filename_header
return response
else:
selected_file.delete()
return Response({'error': 'not found'})
|
20,028 | 03a403c6ade030d80def63b1c8cf7c96dbd00a3b | # -*- coding: utf-8 -*-
import os
class CGroup:
def __init__(self, path):
self.path = path
def path(self):
return self.path
def param_path(self, param):
return os.path.join(self.path, param)
def read_first_line(self, param):
file_path = self.param_path(param)
with open(file_path, "r") as reader:
for row in reader:
if row:
return row.strip()
def read_int(self, param):
try:
first_row = self.read_first_line(param)
except FileNotFoundError:
return 0
try:
return int(first_row) if first_row else 0
except ValueError:
return 0
|
20,029 | e2c9324776678dfd6622bfecf913c095d6f76cd2 | import dataset
distance_table = dataset.generate_distance_table()
device_list = dataset.generate_device_list() |
20,030 | a52fef20ea3f9b998befa759e7b0bb69748bf627 | # -*- coding: utf8
#这个文件依赖node.py生成所需要的nodes_list,与主程序的关系仅仅通过nodes_list.
#我们需要让它生出正确的nodes_list树,nodes_list本质上描述了一个可组织的可输入的逻辑对象
from ..packs import *
class node:
def __init__(self,ntype,name,obj,nexts):
self.ntype=ntype
self.name=name
self.obj=obj
self.nexts=nexts
def update(name,ntype,obj=None,nexts=None):
if ntype=='f':
if (name!='Pipeline.steps'):
params=instance[name].get_params(False)
for i in params:
iname=name+'.'+i
if iname not in nodes_list:
update(iname,'na')
else:
print iname+' has corred'
'''
if iname in bad:
bad[iname].append(name)
else:
bad[iname]=[name]
'''
if nexts==None:
nexts=list(name+'.'+i for i in params.keys())
nodes_list[name]=node(ntype,name,obj,nexts)
instance={
'GridSearchCV':GridSearchCV(LogisticRegression(),{}),
'Pipeline':Pipeline([('clf',LogisticRegression())]),
'CountVectorizer':CountVectorizer(),'HashingVectorizer':HashingVectorizer(),'TfidfVectorizer':TfidfVectorizer(),
'SelectKBest':SelectKBest(),
'GenericUnivariateSelect':GenericUnivariateSelect(),
'RFE':RFE(LogisticRegression()),
'RFECV':RFECV(LogisticRegression()),
'VarianceThreshold':VarianceThreshold(),
'LogisticRegression':LogisticRegression(),'SGDClassifier':SGDClassifier(),
'SVC':SVC(),'NuSVC':NuSVC(),'LinearSVC':LinearSVC(),
'DecisionTreeClassifier':DecisionTreeClassifier(),
'BaggingClassifier':BaggingClassifier(),'AdaBoostClassifier':AdaBoostClassifier(),'RandomForestClassifier':RandomForestClassifier(),#,'GradientBoostingClassifier':GradientBoostingClassifier(),
'MultinomialNB':MultinomialNB(),
'KNeighborsClassifier':KNeighborsClassifier()
}
#functions F
functions=[
GridSearchCV,
Pipeline,
CountVectorizer,HashingVectorizer,TfidfVectorizer,
GenericUnivariateSelect,RFE,RFECV,VarianceThreshold,SelectKBest,
LogisticRegression,SGDClassifier,
SVC,NuSVC,LinearSVC,
DecisionTreeClassifier,
BaggingClassifier,AdaBoostClassifier,RandomForestClassifier,#GradientBoostingClassifier,
MultinomialNB,
KNeighborsClassifier
]
#make selector nodes S
selectors={
'GridSearchCV.estimator':['Pipeline'],
'steps.ext':['CountVectorizer','HashingVectorizer','TfidfVectorizer'],#to add
'steps.sel':['GenericUnivariateSelect','RFE','RFECV','VarianceThreshold','SelectKBest'],
'steps.clf':['LogisticRegression','SGDClassifier',
'SVC','NuSVC','LinearSVC',
'DecisionTreeClassifier',
'BaggingClassifier','AdaBoostClassifier','RandomForestClassifier',#'GradientBoostingClassifier',
'MultinomialNB',
'KNeighborsClassifier']
}
NNs={
'SelectKBest.score_func':chi2
}
#------------------------------------------------------
#make nodes_list
def yyyy(ext=None,sel=None,clf=None):
return[('ext',ext),('sel',sel),('clf',clf)]
nodes_list={}
#make function nodes
#by the way, make na nodes
for i in functions:
update(i.__name__,'f',obj=i)
update('Pipeline.steps','f',obj=yyyy,nexts=['steps.ext','steps.sel','steps.clf'])
#make selector nodes
for i in selectors:
update(i,'s',nexts=selectors[i])
#make nn nodes
for i in NNs:
update(i,'nn',NNs[i],[])
'''
fp=open('.main_keys_values','w')
fp.write('MAIN_KEYS_VALUES\n'+'-'*20+'\n')
fp.write('Here show keys and values for model_params\n')
fp.write('\t1. Keys include following s(selector)(must provided) and \n\tall function leave params(not show following, refer to sklearn\n')
fp.write('\t2. selector key related values is the indent string;leave params ralated values refer to sklearn\n\n')
def print_tree(name,n):
anode=nodes_list[name]
if anode.ntype!='na':
fp.write(n*'\t'+name+'\t'+str(anode.ntype)+'\n')
if anode.nexts!=None:
for i in anode.nexts:
print_tree(i,n+1)
#print nodes_tree
print_tree('GridSearchCV',0)
fp.close()
'''
#pdump('nodes_list_s',nodes_list)
|
20,031 | 788a07a8c5533a83c365d82da80996e2820f95ea | import random
if __name__=="__main__":
random_number = random.randint(1,100)
is_guessed = False
while is_guessed is False:
guessed_number_string = input("Guess a number between 1 and 100: ")
guessed_number_int = int(guessed_number_string)
if guessed_number_int is random_number:
print("correct! the number is: {}".format(random_number))
is_guessed = True
elif guessed_number_int > random_number:
print("your guess is too high")
else:
print("your guess is too low")
|
20,032 | 4e6fffcb1f106fb10fd8a35b845e590bf5b5724f | #!/usr/bin/env python3
limit = 10
my_range = list(range(limit, 1, -1)) + list(range(1, limit + 1))
for i in my_range:
print (' ' * i + '*' * ( limit - i ) * 2 + ' ' * i )
|
20,033 | 3b644136ee117f32764e6d0e7e1d05d06bb1b40d | '''
Given a parentheses string s containing only the characters '(' and ')'. A parentheses string is balanced if:
Any left parenthesis '(' must have a corresponding two consecutive right parenthesis '))'.
Left parenthesis '(' must go before the corresponding two consecutive right parenthesis '))'.
For example, "())", "())(())))" and "(())())))" are balanced, ")()", "()))" and "(()))" are not balanced.
You can insert the characters '(' and ')' at any position of the string to balance it if needed.
Return the minimum number of insertions needed to make s balanced.
Example 1:
Input: s = "(()))"
Output: 1
Explanation: The second '(' has two matching '))', but the first '(' has only ')' matching. We need to to add one more ')' at the end of the string to be "(())))" which is balanced.
Example 2:
Input: s = "())"
Output: 0
Explanation: The string is already balanced.
Example 3:
Input: s = "))())("
Output: 3
Explanation: Add '(' to match the first '))', Add '))' to match the last '('.
Example 4:
Input: s = "(((((("
Output: 12
Explanation: Add 12 ')' to balance the string.
Example 5:
Input: s = ")))))))"
Output: 5
Explanation: Add 4 '(' at the beginning of the string and one ')' at the end. The string becomes "(((())))))))".
Constraints:
1 <= s.length <= 10^5
s consists of '(' and ')' only.
'''
class Solution:
def minInsertions(self, s: str) -> int:
res = right = 0
for char in s:
if char == '(':
if right % 2 == 1:
right -= 1
res += 1
right += 2
else:
right -= 1
if right < 0:
right += 2
res += 1
return right + res
|
20,034 | 2ca1408497902f134bd4cf4f51965a2fa4201561 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'fpmonitor.views.home', name='home'),
url(r'^index$', 'fpmonitor.views.index', name='index'),
url(r'^password_change/$', 'django.contrib.auth.views.password_change', {'template_name': 'password_change.html', 'post_change_redirect': '/index'}),
url(r'^login$', 'fpmonitor.views.user_login', name='login'),
url(r'^logout$', 'fpmonitor.views.user_logout', name='logout'),
url(r'^api/v1/node/maintenance_mode$', 'fpmonitor.views.api_node_maintenance', name='api_node_maintenance'),
url(r'^receive_data$', 'fpmonitor.views.receive_data', name='receive_data'),
url(r'^node/(?P<node_id>[0-9]+)$', 'fpmonitor.views.show_node', name='show_node'),
url(r'^delete_node/(?P<node_id>[0-9]+)$', 'fpmonitor.views.delete_node', name='delete_node'),
url(r'^delete_address/(?P<address_id>[0-9]+)$', 'fpmonitor.views.delete_address', name='delete_address'),
url(r'^alert_logs', 'fpmonitor.views.show_alert_logs', name='show_alert_logs'),
)
urlpatterns += patterns(
'',
(r'^', include('fpmonitor.test_api.urls')),
)
|
20,035 | 93cca15ea93eee4b0ab0f9ba18ec3f7a144b3266 | '''
Python script to test the CNN-LSTM Audio Emotion
Detection Model.
Outputs the predicted emotion and the prediction
probability for "./Audios/test_audio.wav" file.
'''
from tensorflow.keras.models import load_model
import tensorflow as tf
import numpy as np
from scipy.stats import zscore
import librosa
import datetime
try:
model = load_model('Models/[CNN-LSTM]Model.h5')
model._make_predict_function()
except IOError:
raise IOError("Could not find Voice Analysis model. Ensure model is present in: ./Models")
def mel_spectrogram(y, sr=16000, n_fft=512, win_length=256, hop_length=128, window='hamming', n_mels=128, fmax=4000):
'''
Mel-spectogram computation
'''
# Compute spectogram
mel_spect = np.abs(librosa.stft(y, n_fft=n_fft, window=window, win_length=win_length, hop_length=hop_length)) ** 2
# Compute mel spectrogram
mel_spect = librosa.feature.melspectrogram(S=mel_spect, sr=sr, n_mels=n_mels, fmax=fmax)
# Compute log-mel spectrogram
mel_spect = librosa.power_to_db(mel_spect, ref=np.max)
return np.asarray(mel_spect)
def frame(y, win_step=64, win_size=128):
'''
Audio framing
'''
# Number of frames
nb_frames = 1 + int((y.shape[2] - win_size) / win_step)
# Framming
frames = np.zeros((y.shape[0], nb_frames, y.shape[1], win_size)).astype(np.float16)
for t in range(nb_frames):
frames[:,t,:,:] = np.copy(y[:,:,(t * win_step):(t * win_step + win_size)]).astype(np.float16)
return frames
def predict_audio(chunk_step=16000, chunk_size=49100, predict_proba=True, sample_rate=16000):
'''
Method that loads a test audio file from the ./Audios directory
and predicts emotion using the trained model.
'''
_emotion = {0:'Angry', 1:'Disgust', 2:'Fear', 3:'Happy', 4:'Neutral', 5:'Sad', 6:'Surprise'}
label_dict_ravdess = {'02': 'NEU', '03':'HAP', '04':'SAD', '05':'ANG', '06':'FEA', '07':'DIS', '08':'SUR'}
# Retrieve file from request
filepath = "./Audios/test_audio.wav"
max_pad_len = 49100
# Read audio file
y, sr = librosa.core.load(filepath, sr=sample_rate, offset=0.5)
# Z-normalization
y = zscore(y)
# Padding or truncated signal
if len(y) < max_pad_len:
y_padded = np.zeros(max_pad_len)
y_padded[:len(y)] = y
y = y_padded
elif len(y) > max_pad_len:
y = np.asarray(y[:max_pad_len])
# Split audio signals into chunks
chunks = frame(y.reshape(1, 1, -1), chunk_step, chunk_size)
# Reshape chunks
chunks = chunks.reshape(chunks.shape[1],chunks.shape[-1])
# Z-normalization
y = np.asarray(list(map(zscore, chunks)))
# Compute mel spectrogram
mel_spect = np.asarray(list(map(mel_spectrogram, y)))
# Time distributed Framing
mel_spect_ts = frame(mel_spect)
# Build X for time distributed CNN
X = mel_spect_ts.reshape(mel_spect_ts.shape[0],
mel_spect_ts.shape[1],
mel_spect_ts.shape[2],
mel_spect_ts.shape[3],
1)
# Predict emotion
if predict_proba is True:
predict = model.predict(X)
else:
predict = np.argmax(model.predict(X), axis=1)
predict = [_emotion.get(emotion) for emotion in predict]
# Predict timestamp
timestamp = np.concatenate([[chunk_size], np.ones((len(predict) - 1)) * chunk_step]).cumsum()
timestamp = np.round(timestamp / sample_rate)
result = [predict,timestamp]
result_np = np.array(result[0][0])
probability = result_np.max()
emotion = _emotion.get(result_np.argmax())
timestamp = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
emo_dict = {"emotion_audio": str(emotion), "prediction_probability": str(probability)}
print(emo_dict)
if __name__ == "__main__":
predict_audio()
|
20,036 | 5c06128ca37a18ae3f00c52ee9b5e1c31e30dd86 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# Author: Takahiro Oshima <tarotora51@gmail.com>
# License: MIT License
# Created: 2017-10-01
#
import unittest
from tutorial import countGridGraphSet
class TutorialTest(unittest.TestCase):
def setup(self):
print('setup Tutorial Test')
def test_countGridGraphSet(self):
self.assertEquals(countGridGraphSet(8), 3266598486981642)
def suite():
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(TutorialTest))
return suite
|
20,037 | e5b82dc51b146a8e6dd2756df8ecb49c35ce62f5 | """
Palo Alto Networks Assignement - Kamal Qarain
Basic unit tests
"""
import unittest
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from solution.scanner import request_data
from solution.helpers import is_valid_hash
SUCCESS_MSG = 'Scan finished, information embedded'
class TestValidation(unittest.TestCase):
"""
Test case for testing the method that checks for valid file hash
"""
def test_valid_md5(self):
self.assertTrue(is_valid_hash('4371a61227f8b7a4536e91aeff4f9af9'))
def test_valid_sha1(self):
self.assertTrue(is_valid_hash('6E0B782A9B06834290B24C91C80B12D7AD3C3133'))
def test_valid_sha256(self):
self.assertTrue(is_valid_hash('E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855'))
def test_invalid_hash(self):
self.assertFalse(is_valid_hash('randomstring123456789'))
class TestScannnerAPI(unittest.TestCase):
"""
Test case for testing API response with a known malicious file
"""
def test_known_file(self):
try:
request_data('84c82835a5d21bbcf75a61706d8ab549')
except:
self.fail('Known file should return result with no errors, check logging file')
if __name__ == '__main__':
unittest.main()
|
20,038 | ad1e184ca78ee663ce0633e2bebf1f787d287ed7 | # Problem 15 - Lattice paths
import math
import time as T
start=T.time()
def getRoutes(n):
data = list()
i = 0
half = n / 2
max = (1 << n)
while i < max:
s=bin(i)[2:]
s='0'*(n-len(s))+s
o = map(int,list(s))
if (sum(o) == half):
data.append(o)
i += 1
return data
def getLightRoutes(n):
ss = 0
i = n
half = n / 2
max = (1 << n)
while i < (max/2):
s=bin(i)[2:]
s='0'*(n-len(s))+s
o = map(int,list(s))
if (sum(o) == half):
ss += 1
i += 1
return ss * 2
def makeBitWord(i, n):
s=bin(i)[2:]
s='0'*(n-len(s))+s
return s
DIM = 20
# print (DIM, getLightRoutes(DIM + DIM))
# https://math.stackexchange.com/questions/400041/number-of-equivalent-rectangular-paths-between-two-points
print math.factorial(DIM + DIM) / (math.factorial(DIM)*math.factorial(DIM))
print("Executed in {0:.2f} sec").format(T.time()-start) |
20,039 | cc38d7a829a013e191cdf55b3e680e342ee51787 | #importing the libraries we need
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torch import nn
from layers.RAFT.raft import RAFT
import argparse
from layers import efficient_x3d_xs, r2plus1d_18, r2plus1d_50
class Stream_b (nn.Module):
def __init__(self, model, out_dim, raft_parameters_path, device, raft_iters = 12 , trainable = True, ckpt_path: str = None):
super().__init__()
assert model in ['x3d', 'r2plus1d','r2plus1d_50'] , "models suported for stream B are 'x3d', 'r2plus1d', 'r2plus1d_50'"
self.trainable = trainable
self.ckpt_path = ckpt_path
args = self.get_args() #get the args which are the input parameters for the model.
self.device = device
self.out_dim = out_dim
self.raft_iters = raft_iters
self.raft_model = RAFT(args[0])
self.raft_model = torch.nn.DataParallel(self.raft_model)
self.raft_model.load_state_dict(torch.load(raft_parameters_path, map_location = device))
self.raft_model = self.raft_model.module.to(self.device)
for param in self.raft_model.parameters():
param.requires_grad = False
self.batch = nn.BatchNorm3d(2)
self.conv3di = nn.Conv3d(2 , 3 , (3,3,3) , padding = 1)
self.batch = nn.BatchNorm3d(3)
if model == 'x3d':
self.model = efficient_x3d_xs.E_x3d_xs(out_dim)
elif model == 'r2plus1d':
self.model = r2plus1d_18.R2plus1d(out_dim)
elif model == 'r2plus1d_50':
self.model = r2plus1d_50.R2plus1d_50(out_dim)
def forward(self , images_batch):
#images should be on the shape of B x C x T x H x W . H and W must be dividable by 8
images_batch = images_batch.permute(0,2,1,3,4).to(self.device) #shape B x T x C x H x W
raftout = []
for images in images_batch:
self.raft_model.eval()
with torch.no_grad():
_, raft_out = self.raft_model(images[:-1], images[1:], iters=self.raft_iters, test_mode=True)
raftout.append(raft_out)
raftout = torch.stack(raftout) #B , T-1 , C , H , W >>>> c = 2
raftout = raftout.permute(0,2,1,3,4).to(self.device) #shape B x C x T-1 x H x W >>>> c = 2
#print(raftout.shape)
out = self.conv3di(raftout) #shape B x C x T-1 x H x W >>>>> C = 3
#print(out.shape)
out = self.model(out) #shape B x 512
#print(out.shape)
return out
@staticmethod
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
args = parser.parse_known_args()
return args
|
20,040 | fc617c6ceba05d1ec015abb951056ec9d2f6d1c3 | import os
import json
import numpy as np
import argparse
import datetime
import chainer
from chainer import training
from chainer.training import extensions
from qanta.preprocess import preprocess_dataset
from qanta.datasets.quiz_bowl import QuizBowlDataset
from qanta.experimental.nn_guesser import nets
from qanta.experimental.nn_guesser.nlp_utils import convert_seq, transform_to_array
def get_quizbowl():
qb_dataset = QuizBowlDataset(guesser_train=True, buzzer_train=False)
training_data = qb_dataset.training_data()
(
train_x,
train_y,
dev_x,
dev_y,
i_to_word,
class_to_i,
i_to_class,
) = preprocess_dataset(training_data)
i_to_word = ["<unk>", "<eos>"] + sorted(i_to_word)
word_to_i = {x: i for i, x in enumerate(i_to_word)}
train = transform_to_array(zip(train_x, train_y), word_to_i)
dev = transform_to_array(zip(dev_x, dev_y), word_to_i)
return train, dev, word_to_i, i_to_class
def main():
current_datetime = "{}".format(datetime.datetime.today())
parser = argparse.ArgumentParser(description="Chainer NN guesser.")
parser.add_argument(
"--batchsize",
type=int,
default=64,
help="Number of examples in each mini-batch",
)
parser.add_argument(
"--epoch",
type=int,
default=30,
help="Number of sweeps over the dataset to train",
)
parser.add_argument(
"--gpu", type=int, default=0, help="GPU ID (negative value indicates CPU)"
)
parser.add_argument(
"--out", default="result/nn_guesser", help="Directory to output the result"
)
parser.add_argument(
"--model",
default="dan",
choices=["cnn", "rnn", "dan"],
help="Name of encoder model type.",
)
parser.add_argument("--resume", action="store_true", help="Resume training.")
parser.add_argument(
"--glove",
default="data/external/deep/glove.6B.300d.txt",
help="Path to glove embedding file.",
)
parser.set_defaults(resume=False)
args = parser.parse_args()
if args.resume:
with open(os.path.join(args.out, "args.json")) as f:
args.__dict__ = json.loads(f.read())
args.resume = True
print(json.dumps(args.__dict__, indent=2))
train, dev, vocab, answers = get_quizbowl()
n_vocab = len(vocab)
n_class = len(set([int(d[1]) for d in train]))
embed_size = 300
hidden_size = 512
hidden_dropout = 0.3
output_dropout = 0.2
gradient_clipping = 0.25
print("# train data: {}".format(len(train)))
print("# dev data: {}".format(len(dev)))
print("# vocab: {}".format(len(vocab)))
print("# class: {}".format(n_class))
print("embedding size: {}".format(embed_size))
print("hidden size: {}".format(hidden_size))
print("hidden dropout: {}".format(hidden_dropout))
print("output dropout: {}".format(output_dropout))
print("gradient clipping: {}".format(gradient_clipping))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
dev_iter = chainer.iterators.SerialIterator(
dev, args.batchsize, repeat=False, shuffle=False
)
# Setup a model
if args.model == "dan":
encoder = nets.DANEncoder(
n_vocab, embed_size, hidden_size, dropout=hidden_dropout
)
elif args.model == "rnn":
encoder = nets.RNNEncoder(1, n_vocab, embed_size, hidden_size)
model = nets.NNGuesser(encoder, n_class, dropout=output_dropout)
if not args.resume:
model.load_glove(args.glove, vocab, (n_vocab, embed_size))
if args.gpu >= 0:
# Make a specified GPU current
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.Adam(alpha=0.001)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(1e-4))
optimizer.add_hook(chainer.optimizer.GradientClipping(gradient_clipping))
# Set up a trainer
updater = training.StandardUpdater(
train_iter, optimizer, converter=convert_seq, device=args.gpu
)
trainer = training.Trainer(updater, (args.epoch, "epoch"), out=args.out)
# Evaluate the model with the dev dataset for each epoch
trainer.extend(
extensions.Evaluator(dev_iter, model, converter=convert_seq, device=args.gpu)
)
# Take a best snapshot
record_trigger = training.triggers.MaxValueTrigger(
"validation/main/accuracy", (1, "epoch")
)
trainer.extend(
extensions.snapshot_object(model, "best_model.npz"), trigger=record_trigger
)
# Exponential decay of learning rate
# trainer.extend(extensions.ExponentialShift('alpha', 0.5))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
trainer.extend(
extensions.PrintReport(
[
"epoch",
"main/loss",
"validation/main/loss",
"main/accuracy",
"validation/main/accuracy",
"elapsed_time",
]
)
)
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
# Save vocabulary and model's setting
if not os.path.isdir(args.out):
os.mkdir(args.out)
# current = os.path.dirname(os.path.abspath(__file__))
vocab_path = os.path.join(args.out, "vocab.json")
answers_path = os.path.join(args.out, "answers.json")
with open(vocab_path, "w") as f:
json.dump(vocab, f)
with open(answers_path, "w") as f:
json.dump(answers, f)
model_path = os.path.join(args.out, "best_model.npz")
model_setup = args.__dict__
model_setup["vocab_path"] = vocab_path
model_setup["answers_path"] = answers_path
model_setup["model_path"] = model_path
model_setup["n_class"] = n_class
model_setup["datetime"] = current_datetime
with open(os.path.join(args.out, "args.json"), "w") as f:
json.dump(model_setup, f)
if args.resume:
print("loading model {}".format(model_path))
chainer.serializers.load_npz(model_path, model)
# Run the training
trainer.run()
if __name__ == "__main__":
main()
|
20,041 | 35967139a35564413f811ef802558ad38d617ade | import stormpy as st
from numpy import zeros, array, newaxis, reshape, vstack, concatenate, hstack, newaxis, nan, full
from ..mc.MC import MC
from ..mdp.MDP import MDP
from ..ctmc.CTMC import CTMC
from ..pctmc.PCTMC import PCTMC
from copy import deepcopy
import os
from sympy import symbols, sympify
def stormpyModeltoJajapy(h,actions_name:list = [],from_prism=False):
"""
Given a stormpy.SparseCtmc, stormpy.SparseDtmc, stormpy.SparseMdp, or
stormpy.SparseParametricCtmc, it returns the equivalent jajapy model.
The output object will be a jajapy.MC, jajapy.CTMC, jajapy.MDP or
jajapy.PCTMC depending on the input.
Parameters
----------
h : stormpy.SparseCtmc, stormpy.SparseDtmc, stormpy.SparseMdp or stormpy.SparseParametricCtmc
The model to convert.
actions_name : list of str, optional.
If the model is an MDP, the name of the actions in the output
model will be the one in this list. Otherwise they will be
`a0,a1,a2,...`.
Returns
-------
jajapy.MC, jajapy.CTMC, jajapy.MDP or jajapy.PCTMC
The same model in jajapy format.
"""
if type(h) == st.SparseDtmc:
ty = 0
elif type(h) == st.SparseCtmc:
ty = 1
elif type(h) == st.SparseMdp:
ty = 2
#elif type(h) == st.SparseParametricDtmc:
# ty = 3
elif type(h) == st.SparseParametricCtmc:
ty = 4
else:
raise TypeError(str(type(h))+' cannot be translated to Jajapy model.')
labelling = [None for _ in range(len(h.states))]
if ty == 2:
actions = []
for s in h.states:
for a in s.actions:
if len(actions_name) <= int(str(a)):
actions.append('a'+str(a))
else:
actions.append(actions_name[int(str(a))])
actions = list(set(actions))
matrix = zeros((len(h.states),len(actions),len(h.states)))
elif ty == 0 or ty == 1:
matrix = zeros((len(h.states),len(h.states)))
elif ty == 3 or ty == 4:
matrix = zeros((len(h.states),len(h.states)),dtype='uint16')
p_str = []
p_v = {}
p_i = []
t_expr = [sympify(0.0)]
add_init_state = None
for si,s in enumerate(h.states):
c = si
temp = list(s.labels)
if "deadlock" in temp:
temp.remove("deadlock")
temp.sort()
if len(temp) == 0:
labelling[si] = "empty"
elif 'init' in temp and len(temp) > 1:
temp.remove("init")
labelling.append("init")
labelling[si] = '_'.join(list(temp))
add_init_state = c
else:
labelling[si] = '_'.join(list(temp))
for a in s.actions:
for t in a.transitions:
dest = t.column
t_val = t.value()
if ty == 2:
matrix[c][int(str(a))][dest] = t_val
elif ty == 1 or ty == 0:
matrix[c][dest] = t_val
else:
ps = [i.name for i in list(t_val.gather_variables())]
if len(ps) == 1:
ps = [symbols(ps[0])]
elif len(ps) > 1:
ps = list(symbols(" ".join(ps)))
for v in ps:
v = v.name
if not v in p_str:
p_str.append(v)
p_i.append([])
p_v[v] = nan
p_i[p_str.index(v)].append([c,dest])
t_val = sympify(str(t_val))
if t_val.is_real or not t_val in t_expr:
matrix[c][dest] = len(t_expr)
t_expr.append(t_val)
else:
matrix[c][dest] = t_expr.index(t_val)
if ty == 1 and not from_prism:
matrix[c] *= h.exit_rates[si]
if add_init_state != None:
#matrix = vstack((matrix,matrix[add_init_state]))
if ty == 2:
matrix = vstack((matrix,zeros((1,matrix.shape[1],matrix.shape[2]))))
matrix[-1].T[add_init_state] = full(matrix.shape[1],1.0)
matrix = concatenate((matrix,zeros((matrix.shape[0],matrix.shape[1],1))),axis=2)
elif ty == 1 or ty == 0:
matrix = vstack((matrix,zeros((matrix.shape[0]))))
matrix[-1][add_init_state] = 1.0
matrix = hstack((matrix,zeros(len(matrix))[:,newaxis]))
else:
matrix = vstack((matrix,zeros((matrix.shape[0]),dtype='uint16')))
t_val = sympify('1.0')
matrix[-1][add_init_state] = len(t_expr)
t_expr.append(t_val)
matrix = hstack((matrix,zeros(len(matrix),dtype=('uint16'))[:,newaxis]))
if ty == 0:
return MC(matrix, labelling)
elif ty == 1:
return CTMC(matrix, labelling)
elif ty == 2:
return MDP(matrix,labelling,actions)
#elif ty == 3:
# return PMC(matrix,labelling,p_v,p_i,p_str)
elif ty == 4:
return PCTMC(matrix,labelling,t_expr,p_v,p_i,p_str)
def jajapyModeltoStormpy(h):
"""
Given a jajapy.MC, a jajapy.CTMC, a jajapy.MDP or an instantiated
jajapy.PCTMC, it returns the equivalent stormpy sparse model.
The output object will be a stormpy.SparseCtmc, stormpy.SparseDtmc,
stormpy.SparseMdp, or stormpy.SparseParametricCtmc depending on the input.
Parameters
----------
h : jajapy.MC, jajapy.CTMC, jajapy.MDP or instantiated jajapy.PCTMC
The model to convert.
Returns
-------
stormpy.SparseCtmc, stormpy.SparseDtmc, stormpy.SparseMdp or stormpy.SparseParametricCtmc
The same model in stormpy format.
"""
if type(h) == MDP:
return MDPtoStormpy(h)
elif type(h) == CTMC:
return CTMCtoStormpy(h)
elif type(h) == MC:
return MCtoStormpy(h)
elif type(h) == PCTMC:
try:
h = PCTMCtoCTMC(h)
except ValueError:
raise ValueError("Cannot convert non-instantiated PCTMC to Stormpy.")
return CTMCtoStormpy(h)
else:
raise TypeError(str(type(h))+' cannot be translated to a stormpy sparse model.')
def _buildStateLabeling(h):
state_labelling = st.storage.StateLabeling(h.nb_states)
for o in h.getAlphabet():
state_labelling.add_label(o)
for s in range(h.nb_states):
state_labelling.add_label_to_state(h.labelling[s],s)
return state_labelling
def MDPtoStormpy(h):
"""
Given a jajapy.MDP, it returns the equivalent stormpy sparse model.
The output object will be a stormpy.SparseMdp.
Parameters
----------
h : jajapy.MDP
The model to convert.
Returns
-------
stormpy.SparseMdp
The same model in stormpy format.
"""
state_labelling = _buildStateLabeling(h)
nb_actions = len(h.getActions())
transition_matrix = h.matrix
transition_matrix = reshape(transition_matrix.flatten(),(h.nb_states*nb_actions,h.nb_states))
transition_matrix = st.build_sparse_matrix(transition_matrix,[nb_actions*i for i in range(h.nb_states)])
choice_labelling = st.storage.ChoiceLabeling(h.nb_states*nb_actions)
for ia,a in enumerate(h.getActions()):
choice_labelling.add_label(a)
choice_labelling.add_label_to_choice(a,ia)
reward_models = {}
action_reward = [-1.0 for _ in range(len(transition_matrix))]
reward_models["nb_executed_actions"] = st.SparseRewardModel(optional_state_action_reward_vector = action_reward)
components = st.SparseModelComponents(transition_matrix=transition_matrix,
state_labeling=state_labelling,
reward_models=reward_models)
components.choice_labeling = choice_labelling
mdp = st.storage.SparseMdp(components)
return mdp
def MCtoStormpy(h):
"""
Given a jajapy.MC, it returns the equivalent stormpy sparse model.
The output object will be a stormpy.SparseDtmc.
Parameters
----------
h : jajapy.MC
The model to convert.
Returns
-------
stormpy.SparseDtmc
The same model in stormpy format.
"""
state_labelling = _buildStateLabeling(h)
transition_matrix = h.matrix
transition_matrix = st.build_sparse_matrix(transition_matrix)
components = st.SparseModelComponents(transition_matrix=transition_matrix,
state_labeling=state_labelling)
mc = st.storage.SparseDtmc(components)
return mc
def CTMCtoStormpy(h):
"""
Given a jajapy.CTMC, it returns the equivalent stormpy sparse model.
The output object will be a stormpy.SparseCtmc.
Parameters
----------
h : jajapy.CTMC
The model to convert.
Returns
-------
stormpy.SparseCtmc
The same model in stormpy format.
"""
state_labelling = _buildStateLabeling(h)
transition_matrix = deepcopy(h.matrix)
e = array([h.e(s) for s in range(h.nb_states)])
transition_matrix /= e[:,newaxis]
transition_matrix = st.build_sparse_matrix(transition_matrix)
components = st.SparseModelComponents(transition_matrix=transition_matrix,
state_labeling=state_labelling,
rate_transitions=True)
components.exit_rates = e
ctmc = st.storage.SparseCtmc(components)
return ctmc
def PCTMCtoCTMC(h: PCTMC) -> CTMC:
"""
Translates a given instantiated PCTMC to an equivalent CTMC.
Parameters
----------
h : PCTMC
An instantiated PCTMC.
Returns
-------
CTMC
The equivalent CTMC.
Raises
------
ValueError
If `h` is a non-instantiated PCTMC.
"""
if not h.isInstantiated():
raise ValueError("Cannot convert non-instantiated PCTMC to CTMC.")
res = zeros(h.matrix.shape)
for s in range(h.nb_states):
for ss in range(h.nb_states):
res[s,ss] = h.transitionValue(s,ss)
return CTMC(res, h.labelling, h.name)
def loadPrism(path: str):
"""
Load the model described in file `path` under Prism format.
Remark: this function uses the stormpy parser for Prism file.
Remarks
-------
For technical reason, this function clear the terminal on usage.
Parameters
----------
path : str
Path to the Prism model to load.
Returns
-------
jajapy.MC, jajapy.CTMC, jajapy.MDP or jajapy.PCTMC
A jajapy model equivalent to the model described in `path`.
"""
try:
prism_program = st.parse_prism_program(path,False)
except RuntimeError:
prism_program = st.parse_prism_program(path,True)
try:
stormpy_model = st.build_model(prism_program)
except RuntimeError:
stormpy_model = st.build_parametric_model(prism_program)
if os.name != "nt":
os.system('clear')
else:
os.system('cls')
jajapy_model = stormpyModeltoJajapy(stormpy_model,from_prism=True)
return jajapy_model |
20,042 | 3a5a9434840410ed4cbfe4da74bf43debe555c8f | COST_PER_WIDGET = 7.49
#Constant price of one widget
nWidgets = input ('How many widgets do you want to buy? ')
nWidgets = int(nWidgets)
#Convert to an integer
if nWidgets == 1:
print('One widget will cost you $' , COST_PER_WIDGET)
else:
cost = nWidgets * COST_PER_WIDGET
print(nWidgets, 'widgets will cost you $', cost)
|
20,043 | 71278ee5efe7d702f4c06fa0fb960be60201efd3 | import datetime
import json
import math
import random
import re
from typing import Any, Dict, List
from chaoslib.exceptions import ActivityFailed
from chaoslib.types import Secrets
from kubernetes import client, stream
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.stream.ws_client import ERROR_CHANNEL, STDOUT_CHANNEL
from logzero import logger
from chaosk8s import _log_deprecated, create_k8s_api_client
__all__ = ["terminate_pods", "exec_in_pods"]
def terminate_pods(
label_selector: str = None,
name_pattern: str = None,
all: bool = False,
rand: bool = False,
mode: str = "fixed",
qty: int = 1,
grace_period: int = -1,
ns: str = "default",
order: str = "alphabetic",
secrets: Secrets = None,
):
"""
Terminate a pod gracefully. Select the appropriate pods by label and/or
name patterns. Whenever a pattern is provided for the name, all pods
retrieved will be filtered out if their name do not match the given
pattern.
If neither `label_selector` nor `name_pattern` are provided, all pods
in the namespace will be selected for termination.
If `all` is set to `True`, all matching pods will be terminated.
Value of `qty` varies based on `mode`.
If `mode` is set to `fixed`, then `qty` refers to number of pods to be
terminated. If `mode` is set to `percentage`, then `qty` refers to
percentage of pods, from 1 to 100, to be terminated.
Default `mode` is `fixed` and default `qty` is `1`.
If `order` is set to `oldest`, the retrieved pods will be ordered
by the pods creation_timestamp, with the oldest pod first in list.
If `rand` is set to `True`, n random pods will be terminated
Otherwise, the first retrieved n pods will be terminated.
If `grace_period` is greater than or equal to 0, it will
be used as the grace period (in seconds) to terminate the pods.
Otherwise, the default pod's grace period will be used.
"""
api = create_k8s_api_client(secrets)
v1 = client.CoreV1Api(api)
pods = _select_pods(
v1, label_selector, name_pattern, all, rand, mode, qty, ns, order
)
body = client.V1DeleteOptions()
if grace_period >= 0:
body = client.V1DeleteOptions(grace_period_seconds=grace_period)
deleted_pods = []
for p in pods:
v1.delete_namespaced_pod(p.metadata.name, ns, body=body)
deleted_pods.append(p.metadata.name)
return deleted_pods
def exec_in_pods(
cmd: str,
label_selector: str = None,
name_pattern: str = None,
all: bool = False,
rand: bool = False,
mode: str = "fixed",
qty: int = 1,
ns: str = "default",
order: str = "alphabetic",
container_name: str = None,
request_timeout: int = 60,
secrets: Secrets = None,
) -> List[Dict[str, Any]]:
"""
Execute the command `cmd` in the specified pod's container.
Select the appropriate pods by label and/or name patterns.
Whenever a pattern is provided for the name, all pods retrieved will be
filtered out if their name do not match the given pattern.
If neither `label_selector` nor `name_pattern` are provided, all pods
in the namespace will be selected for termination.
If `all` is set to `True`, all matching pods will be affected.
Value of `qty` varies based on `mode`.
If `mode` is set to `fixed`, then `qty` refers to number of pods affected.
If `mode` is set to `percentage`, then `qty` refers to
percentage of pods, from 1 to 100, to be affected.
Default `mode` is `fixed` and default `qty` is `1`.
If `order` is set to `oldest`, the retrieved pods will be ordered
by the pods creation_timestamp, with the oldest pod first in list.
If `rand` is set to `True`, n random pods will be affected
Otherwise, the first retrieved n pods will be used
"""
if not cmd:
raise ActivityFailed("A command must be set to run a container")
api = create_k8s_api_client(secrets)
v1 = client.CoreV1Api(api)
pods = _select_pods(
v1, label_selector, name_pattern, all, rand, mode, qty, ns, order
)
exec_command = cmd.strip().split()
results = []
for po in pods:
logger.debug(
f"Picked pods '{po.metadata.name}' for command execution {exec_command}"
)
if not any(c.name == container_name for c in po.spec.containers):
logger.debug(
f"Pod {po.metadata.name} do not have container named '{container_name}'"
)
continue
# Use _preload_content to get back the raw JSON response.
resp = stream.stream(
v1.connect_get_namespaced_pod_exec,
po.metadata.name,
ns,
container=container_name,
command=exec_command,
stderr=True,
stdin=False,
stdout=True,
tty=False,
_preload_content=False,
)
resp.run_forever(timeout=request_timeout)
err = json.loads(resp.read_channel(ERROR_CHANNEL))
out = resp.read_channel(STDOUT_CHANNEL)
if err["status"] != "Success":
error_code = err["details"]["causes"][0]["message"]
error_message = err["message"]
else:
error_code = 0
error_message = ""
results.append(
dict(
pod_name=po.metadata.name,
exit_code=error_code,
cmd=cmd,
stdout=out,
stderr=error_message,
)
)
return results
###############################################################################
# Internals
###############################################################################
def _sort_by_pod_creation_timestamp(pod: V1Pod) -> datetime.datetime:
"""
Function that serves as a key for the sort pods comparison
"""
return pod.metadata.creation_timestamp
def _select_pods(
v1: client.CoreV1Api = None,
label_selector: str = None,
name_pattern: str = None,
all: bool = False,
rand: bool = False,
mode: str = "fixed",
qty: int = 1,
ns: str = "default",
order: str = "alphabetic",
) -> List[V1Pod]:
# Fail if CoreV1Api is not instanciated
if v1 is None:
raise ActivityFailed("Cannot select pods. Client API is None")
# Fail when quantity is less than 0
if qty < 0:
raise ActivityFailed(f"Cannot select pods. Quantity '{qty}' is negative.")
# Fail when mode is not `fixed` or `percentage`
if mode not in ["fixed", "percentage"]:
raise ActivityFailed(f"Cannot select pods. Mode '{mode}' is invalid.")
# Fail when order not `alphabetic` or `oldest`
if order not in ["alphabetic", "oldest"]:
raise ActivityFailed(f"Cannot select pods. Order '{order}' is invalid.")
if label_selector:
ret = v1.list_namespaced_pod(ns, label_selector=label_selector)
logger.debug(
f"Found {len(ret.items)} pods labelled '{label_selector}' in ns {ns}"
)
else:
ret = v1.list_namespaced_pod(ns)
logger.debug(f"Found {len(ret.items)} pods in ns '{ns}'")
pods = []
if name_pattern:
pattern = re.compile(name_pattern)
for p in ret.items:
if pattern.search(p.metadata.name):
pods.append(p)
logger.debug(f"Pod '{p.metadata.name}' match pattern")
else:
pods = ret.items
if order == "oldest":
pods.sort(key=_sort_by_pod_creation_timestamp)
if not all:
if mode == "percentage":
qty = math.ceil((qty * len(pods)) / 100)
# If quantity is greater than number of pods present, cap the
# quantity to maximum number of pods
qty = min(qty, len(pods))
if rand:
pods = random.sample(pods, qty)
else:
pods = pods[:qty]
return pods
def delete_pods(
name: str = None,
ns: str = "default",
label_selector: str = None,
secrets: Secrets = None,
):
"""
Delete pods by `name` or `label_selector` in the namespace `ns`.
This action has been deprecated in favor of `terminate_pods`.
"""
_log_deprecated("delete_pods", "terminate_pods")
return terminate_pods(
name_pattern=name, label_selector=label_selector, ns=ns, secrets=secrets
)
|
20,044 | e6b211963fceb0797a33d860cee7dfd682a5c01e | """Este programa simula um robô de serviços, num restaurante com uma mesa de forma, tamanho e posição
aleatórios. Quando o utilizador clica na área da mesa, o robô inicia o serviço para essa mesa,
consistindo numa ida à mesa para receber um pedido, regresso ao balcão para preparar o pedido,
entrega do pedido à mesa, e regresso ao balcão. O robô tem uma bateria, pelo que tem que
ir a uma Docstation carregar, quando deteta que não vai conseguir finalizar o serviço."""
from graphics import*
import random
import time
import math
import menu
n=0
class Balcao:
def __init__(self, win, ponto1, ponto2): #Define o balcao
self.ponto1=ponto1
self.ponto2=ponto2
self.balcao=Rectangle(ponto1, ponto2)
self.balcao.setFill('brown')
self.balcao.draw(win)
class Mesa:
def __init__(self): #Define a mesa
self.centroX=[] #Lista com as coordenadas X do centro da mesa
self.centroY=[] #Lista com as coordenadas Y do centro da mesa
self.semilado=[] #Lista com ao tamanho do raio/semilado da mesa
def desenhar(self, win):
self.forma=random.randint(0,1) #Os valores 0 e 1 determinam se a mesa é circular ou retangular, respetivamente
self.centroX.append(random.randint(30, 350)) #O X do centro varia entre 30 e 350
self.centroY.append(random.randint(30, 350)) #O Y do centro varia entre 30 e 350
for i in range (2):
self.semilado.append(random.randint(18, 40))
if self.forma==0: #Caso seja circular
self.mesa=Circle(Point(self.centroX[0], self.centroY[0]), self.semilado[1])
self.mesa.setFill('tan')
self.mesa.draw(win)
elif self.forma==1: #Caso seja retangular
self.mesa=Rectangle(Point(self.centroX[0]-self.semilado[0], self.centroY[0]-self.semilado[1]),\
Point(self.centroX[0]+self.semilado[0], self.centroY[0]+self.semilado[1]))
self.mesa.setFill('tan')
self.mesa.draw(win)
class Robot:
def __init__(self, win, centro, Robotraio): #Define o robot
self.centro=centro
self.Robotraio=Robotraio
self.robot=Circle(centro, Robotraio)
self.robot.setFill('black')
self.robot.draw(win)
self.contador=contador=0 #Marca um contador, estabelecido a 0
self.bateria=Circle(centro, Robotraio/3)
self.bateria.setFill('lime green')
self.bateria.draw(win)
def Carregar(self, lc, hc, cor, contador): #Define o movimento de ir carregar (pelo x - lc, pelo y - hc)
self.bateria.setFill(cor)
for i in range(1000):
self.robot.move(lc,hc)
self.bateria.move(lc,hc)
update(200)
self.contador=self.contador+math.fabs(lc)+math.fabs(hc)
def Servico(self, lm, hm, contador): #Define o movimento do serviço [pelo x - lm, pelo y - hm]
for i in range(1000):
self.robot.move(lm, hm)
self.bateria.move(lm, hm)
update(200)
self.contador=self.contador+math.fabs(lm*1000)+math.fabs(hm*1000)
def Deslocacao(self, Mesa): #Movimento
if self.contador+4*(math.sqrt((self.dx*1000)**2+(self.dy*1000)**2))>=3585:
self.Carregar(-375/1000, 0, 'red', self.contador) #Muda de cor ao ir carregar
self.Carregar(0, 40/1000, 'red', self.contador)
self.bateria.setFill('blue') #Muda de cor ao carregar
self.contador=0
time.sleep(2)
self.Carregar(0, -40/1000, 'lime green', self.contador) #Volta à cor original
self.Carregar(375/1000, 0, 'lime green', self.contador)
for i in range (2):
self.Servico(self.dx, self.dy, self.contador)
time.sleep(2)
self.Servico(-self.dx, -self.dy, self.contador)
time.sleep(2)
def Move (self, win, Mesa): #Define os vetores de movimento
mesa=Mesa
self.dx=(mesa.centroX[0]-self.centro.getX())/1000
self.dy=(mesa.centroY[0]-self.centro.getY()+mesa.semilado[1]+15)/1000
while n==0:
self.posicao=win.getMouse()
if mesa.forma==1: #Caso seja retangular
if mesa.centroX[0]-mesa.semilado[0]<=self.posicao.getX()<=mesa.centroX[0]+mesa.semilado[0] and\
mesa.centroY[0]-mesa.semilado[1]<=self.posicao.getY()<=mesa.centroY[0]+mesa.semilado[1]: #Percurso do robot
self.Deslocacao(Mesa)
if mesa.forma==0: #Caso seja circular
if math.sqrt((self.posicao.getX()-int(mesa.centroX[0]))**2+(self.posicao.getY()-int(mesa.centroY[0]))**2)<=int(mesa.semilado[1]):
self.Deslocacao(Mesa)
if 450<=self.posicao.getX()<=500 and 0<=self.posicao.getY()<=50: #Voltar ao menu
win.close()
menu.menu()
class Docstation:
def __init__(self, win, vertice): #Desenhar a Docstation
self.vertice=vertice
self.docstation=Rectangle(Point(0,500), vertice)
self.docstation.setFill('red')
self.docstation.draw(win)
Text(Point(50, 485), "Docstation").draw(win)
class Voltar:
def __init__(self, win): #Desenhar o botao para voltar ao menu
self.botao=Rectangle(Point(450, 0), Point(500, 50))
self.botao.draw(win)
Text(Point(475, 25), "Voltar").draw(win)
def terceiraA():
win = GraphWin("Restaurante", 750, 750)
win.setCoords(0, 0, 500, 500)
balcaoObj=Balcao(win, Point(350, 440), Point(500, 500))
docs=Docstation(win, Point(100, 450))
mesaObj=Mesa()
mesaObj.desenhar(win)
Voltar(win)
robotObj=Robot(win, Point(425, 425), 10)
robotObj.Move(win,mesaObj) |
20,045 | 34a46515211d3f8d53e14243effc70b798e8406a | """Internal Certbot display utilities."""
import sys
import textwrap
from typing import List
from typing import Optional
from acme import messages as acme_messages
from certbot.compat import misc
def wrap_lines(msg: str) -> str:
"""Format lines nicely to 80 chars.
:param str msg: Original message
:returns: Formatted message respecting newlines in message
:rtype: str
"""
lines = msg.splitlines()
fixed_l = []
for line in lines:
fixed_l.append(textwrap.fill(
line,
80,
break_long_words=False,
break_on_hyphens=False))
return '\n'.join(fixed_l)
def parens_around_char(label: str) -> str:
"""Place parens around first character of label.
:param str label: Must contain at least one character
"""
return "({first}){rest}".format(first=label[0], rest=label[1:])
def input_with_timeout(prompt: Optional[str] = None, timeout: float = 36000.0) -> str:
"""Get user input with a timeout.
Behaves the same as the builtin input, however, an error is raised if
a user doesn't answer after timeout seconds. The default timeout
value was chosen to place it just under 12 hours for users following
our advice and running Certbot twice a day.
:param str prompt: prompt to provide for input
:param float timeout: maximum number of seconds to wait for input
:returns: user response
:rtype: str
:raises errors.Error if no answer is given before the timeout
"""
# use of sys.stdin and sys.stdout to mimic the builtin input based on
# https://github.com/python/cpython/blob/baf7bb30a02aabde260143136bdf5b3738a1d409/Lib/getpass.py#L129
if prompt:
sys.stdout.write(prompt)
sys.stdout.flush()
line = misc.readline_with_timeout(timeout, prompt)
if not line:
raise EOFError
return line.rstrip('\n')
def separate_list_input(input_: str) -> List[str]:
"""Separate a comma or space separated list.
:param str input_: input from the user
:returns: strings
:rtype: list
"""
no_commas = input_.replace(",", " ")
# Each string is naturally unicode, this causes problems with M2Crypto SANs
# TODO: check if above is still true when M2Crypto is gone ^
return [str(string) for string in no_commas.split()]
def summarize_domain_list(domains: List[str]) -> str:
"""Summarizes a list of domains in the format of:
example.com.com and N more domains
or if there is are only two domains:
example.com and www.example.com
or if there is only one domain:
example.com
:param list domains: `str` list of domains
:returns: the domain list summary
:rtype: str
"""
if not domains:
return ""
length = len(domains)
if length == 1:
return domains[0]
elif length == 2:
return " and ".join(domains)
else:
return "{0} and {1} more domains".format(domains[0], length-1)
def describe_acme_error(error: acme_messages.Error) -> str:
"""Returns a human-readable description of an RFC7807 error.
:param error: The ACME error
:returns: a string describing the error, suitable for human consumption.
:rtype: str
"""
parts = (error.title, error.detail)
if any(parts):
return ' :: '.join(part for part in parts if part is not None)
if error.description:
return error.description
return error.typ
|
20,046 | 43d81c7b9482dbd9860275dead5083399d0636bd |
# coding: utf-8
# In[133]:
#!/usr/bin/env python
# import sys
# import os
import numpy as np
from PIL import Image
np.set_printoptions(suppress=True)
# In[134]:
# A) =Вход=
# 1) 2 файла изображений стереопары (в некой папке img/):
# 20160824-174253-406-1.jpg
# 20160824-174253-406-2.jpg
date = "20160909-141139-078"
# for ipynb
fname_left = 'img/' + date + '-1.jpg'
fname_right = 'img/' + date + '-2.jpg'
# for cmd line run
# fname_left = os.path.abspath(sys.argv[0])
# fname_right = os.path.abspath(sys.argv[1])
# In[135]:
img_left = Image.open(fname_left).convert(mode='L')
img_right = Image.open(fname_right).convert(mode='L')
print """Images loaded as grayscale:
%s
%s""" % (fname_left, fname_right)
# In[136]:
# 2) Конфигурация эксперимента
# Txt-файлы (в папке config)
# * Аффинные+дисторсные коэффициенты для цифровой юстировки стереопары:
# файл aff_dist.txt: a, b, c, d, e, f, eps1, eps2 -- 8 коэффициентов
# rX = a*lX + b*lY + e - eps1*z_x(lX, lY) + eps2*z_x(rX, rY)
# rY = c*lX + d*lY + f - eps1*z_y(lX, lY) + eps2*z_y(rX, rY),
# where approximately(!):
# z_x = (x-x0)*[ (x-x0)^2 +(y-y0)^2 ] = z_x(rX, rY) = z_x(lX, lY)
# z_y = (y-y0)*[ (x-x0)^2 +(y-y0)^2 ] = z_y(rX, rY) = z_y(lY, lY)
align_coeffs = np.loadtxt('config/aff_dist.txt')
print 'Align coeeficients:\n', align_coeffs
# In[137]:
# B) Алгоритм автоматизированного анализа стереопары
# a) Подготовка к анализу:
# -- Юстировка("Нормализация") изображений для возможности анализа.
a = align_coeffs[0];
b = align_coeffs[1];
c = align_coeffs[2];
d = align_coeffs[3];
e = align_coeffs[4];
f = align_coeffs[5];
eps1 = align_coeffs[6];
eps2 = align_coeffs[7];
# In[138]:
det = a * d - b * c;
inv_a = d / det;
inv_b = -b / det;
inv_c = -c / det;
inv_d = a / det;
# In[139]:
def affine_transform_point(x, y):
return [b * y + x * a + e , d * y + x * c + f]
# In[140]:
def apply_affine(img_left, img_right):
width = img_left.width
height = img_left.height
aff_coord = np.zeros((4, 2))
# affine transformation of the corner points
aff_coord[0] = affine_transform_point(0, 0)
aff_coord[1] = affine_transform_point(width, 0)
aff_coord[2] = affine_transform_point(0, height)
aff_coord[3] = affine_transform_point(width, height)
# the rightmost (biggest by value) x-coordinate of the transformed
# left-top and left-bottom x-coordinates
x0 = int( max(aff_coord[0, 0], aff_coord[2, 0]) )
# the lowermost (biggest by value) y-coordinate of the transformed
# left-top and right-top y-coordinates
y0 = int( max(aff_coord[0, 1], aff_coord[1, 1]) )
# the leftmost (smallest by value) x-coordinate of the transformed
# right-top and right-bottom x-coordinates
x1 = int( min(aff_coord[1, 0], aff_coord[3, 0]) )
# the uppermost (smallest by value) y-coordinate of the transformed
# left-bottom and right-bottom y-coordinates
y1 = int( min(aff_coord[2, 1], aff_coord[3, 1]) )
# n_x0 -- x-coordinate of the new left-bot point
n_x0 = int( max(0, x0) )
# n_y0 -- y-coordinate of the new left-bot point
n_y0 = int( max(0, y0) )
# n_x1 -- x-coordinate of the new right-top point
n_x1 = int( min(width, x1) )
# n_y1 -- y-coordinate of the new right-top point
n_y1 = int( min(height, y1) )
nw = n_x1 - n_x0 # new width
nh = n_y1 - n_y0 # new height
new_left_img = Image.new(mode='L', size=(nw, nh))
new_right_img = Image.new(mode='L', size=(nw, nh))
# Load pixmaps
l_pix = img_left.load()
r_pix = img_right.load()
nl_pix = new_left_img.load()
nr_pix = new_right_img.load()
for y in xrange(n_y0, n_y1):
for x in xrange(n_x0, n_x1):
# Let's calculate backwards our original coordinates of the left image
orig_x = int( (x - e) * inv_a + (y - f) * inv_b )
orig_y = int( (x - e) * inv_c + (y - f) * inv_d )
# assert(0 <= orig_x <= width)
# assert(0 <= orig_y <= height)
# paint new images with coordinates from (0,0) to (nw - 1, nh - 1)
nl_pix[x - n_x0, y - n_y0] = l_pix[orig_x, orig_y]
nr_pix[x - n_x0, y - n_y0] = r_pix[x, y]
return (new_left_img, new_right_img)
# In[141]:
img_left_n, img_right_n = apply_affine(img_left, img_right)
img_left = img_left_n
img_right = img_right_n
# In[142]:
fname_left[:-4]+"_aff_applied.png"
# In[143]:
img_left.save(fname_left[: -4] + "_aff_applied.png")
img_right.save(fname_right[: -4] + "_aff_applied.png")
|
20,047 | e4bd1f1ec981ce3a4ac67cadac8e09a4d6e17598 | class Flitter:
def post(self, author, message):
"""
Post a message
:param author: The user name of the author
:type author: string
:param message: The message to be posted
:type message: string
:return: nothing
:rtype: void
"""
pass
def get_feed_for(self, user):
"""
Get messages in a users feed.
:param user: The user to get messages for
:type user: string
:return: All the messages as a list of dicts with author and message
:rtype: list(dict(author=string, message=string))
"""
pass
def follow(self, follower, followee):
"""
Make one user follow another
:param follower: The user who is following
:type follower: string
:param followee: The user being followed
:type followee: string
:return: nothing
:rtype: void
"""
pass
|
20,048 | cda529f15f7213dcecc3d82b1114cc3e95e2dc4d | """Data file for Period 4"""
import model # projects definitions are placed in different file
from flask import url_for
def setup():
"""EXAMPLE = model.Project("Example", url_for('teacher_bp.index'), "/static/img/teacher.png", "Team Teacher",
["John Mortensen", "Classroom of 40"],
"Visit a VANTA birds experience and see how it is made.")"""
p4_slackbots = model.Project("Merch Website", "http://76.176.109.127:6969/", "/static/img/p4_slackbots.png",
"P4Slackbots",
["Abhijay Deevi", "Kevin Do", "Travis Medley", "Paul Bokelman", "Gavin Theriault"],
"This project is a merch website that we created for our Youtube channels, "
"GodlyGoats and "
"Albertpani Compani. We have a lot of merch you can buy and other information.")
p4_hangman = model.Project("Music Website", url_for('p4_hangman_bp.index'), "/static/img/p4hangman.png",
"P4 Hangman",
["Charlie Zhu", "Rohan Nallapati", "Rivan Nayak", "Sarah Xie", "Noah Pidding"],
"This website includes a portfolio of our projects we worked on this trimester as well "
"as a music section including three different genres of music with multiple examples "
"and descriptions of each.")
p4_fruitycoders = model.Project("Photography Website", "google.com", "/static/img/p4_fruitycoders.png",
"P4 fruitycoders",
["Sophie Lee", "Linda Long", "Maggie Killada", "Adam Holbel", "Wenshi Bao"],
"Our website (Fruity Photos) features the history of photography, as well as the "
"works "
"and biographies of several famous photographers, such as Ansel Adams and Annie "
"Leibovitz.")
"""p4_coderjoes = model.Project("CoderJoes Store", url_for('p4_coderjoes_bp.index'), "/static/img/p4_coderjoes.png",
"P4 Guessers",
["Lola Bulkin", "Grace Le", "Ryan Moghaddas", "William Cherres", "Brayden Basinger"],
"CoderJoes is a virtual store where you can find recipes, ideas, and descriptions, "
"as well as a group portfolio of our work over the trimester.")"""
projects = [p4_slackbots, p4_hangman, p4_fruitycoders]#, p4_coderjoes
period = model.Period("Period 4", "AP Principles of Computer Science - Python", projects)
return period
|
20,049 | fd0b5eb9c303335168e4ed3ec8b76ee5b24369fe | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 函数
# 无返回值函数 void
def showName(name):
print name
showName("fuck")
# 有返回值函数 return
def getName(name,age):
return str(age)+name
print getName("fuck",12)
# 缺省参数 最少得赋值一个参数,且缺省参数必须初始化,否则抛异常
def getParamters(var1,var2=12):
print var1,var2
getParamters(var1="sdf")
getParamters(var2=123,var1="666")
# 不定长参数
"""
你可能需要一个函数能处理比当初声明时更多的参数。这些参数叫做不定长参数,
和上述2种参数不同,声明时不会命名。基本语法如下:
def functionname([formal_args,] *var_args_tuple ):
"函数_文档字符串"
function_suite
return [expression]
"""
def printInfo(arg0,*args):
print "输出:"
print arg0
for item in args:
print item
return;
printInfo(10)
printInfo(10,11,12,13)
# 匿名函数
"""
lambda函数的语法只包含一个语句,如下:
lambda [arg1 [,arg2,.....argn]]:expression
"""
sum = lambda arg0,arg1:arg0+arg1;
print sum(10,10)
print sum(20,20)
# 全局变量
index = 0;
def setIndex():
global index
index = 1
def getIndex():
return index;
setIndex()
print getIndex() |
20,050 | 3de01a933b92cfbcf9c15167711d2b5cddfbd841 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
#%% LOAD DATA
# Read the data files.
ys1 = np.genfromtxt(fname='data/ys.csv', delimiter=',')
ts1 = np.genfromtxt(fname='data/ts.csv', delimiter=',')
# Convert numpy array to pandas DataFrame.
ys = pd.DataFrame(ys1)
ts = pd.DataFrame(ts1)
#%% MODEL FIT AND PREDICTION
# First order polynomial model.
# Parameters of the predictive model. ph is Prediction horizon, mu is Forgetting factor.
ph = 30
mu = 0.9
n_s = len(ys)
# Arrays that will contain predicted values.
tp_pred = np.zeros(n_s-1)
yp_pred = np.zeros(n_s-1)
# Real time data acquisition is here simulated and a prediction of ph minutes forward is estimated.
# At every iteration of the for cycle a new sample from CGM is acquired.
for i in range(2, n_s+1):
ts_tmp = ts[0:i]
ys_tmp = ys[0:i]
ns = len(ys_tmp)
# The mu**k coefficient represents the weight of the blood glucose sample
# at k instants before the current sampling time. Last acquired sample's
# weight is mu**k where k == 0, it has the greatetes weight.
weights = np.ones(ns)*mu
for k in range(ns):
weights[k] = weights[k]**k
weights = np.flip(weights, 0)
# MODEL
# Perform an Ordinary least squares Linear Regression.
lm_tmp = linear_model.LinearRegression()
model_tmp = lm_tmp.fit(ts_tmp, ys_tmp, sample_weight=weights)
# Coefficients of the linear model, y = mx + q
m_tmp = model_tmp.coef_
q_tmp = model_tmp.intercept_
# PREDICTION
tp = ts.iloc[ns-1,0] + ph
yp = m_tmp*tp + q_tmp
tp_pred[i-2] = tp
yp_pred[i-2] = yp
print("mean square error : ")
print(mean_squared_error(ys1[1:],yp_pred))
#%% PLOT
# Hypoglycemia threshold vector.
t_tot = [l for l in range(int(ts.min()), int(tp_pred.max())+1)]
hypo = 70*np.ones(len(t_tot))
fig, ax = plt.subplots()
fig.suptitle('Glucose prediction', fontsize=14, fontweight='bold')
ax.set_title('mu = %g, ph=%g ' %(mu, ph))
ax.plot(tp_pred, yp_pred, '--', label='Prediction')
ax.plot(ts.iloc[:,0], ys.iloc[:,0], label='CGM data')
ax.plot(t_tot, hypo, label='Hypoglycemia threshold')
ax.set_xlabel('time (min)')
ax.set_ylabel('glucose (mg/dl)')
ax.legend()
|
20,051 | 6a5c55f1749833a23895b62345b23376d6a6ee57 | import time
import NeuralNetwork
import numpy as np
from lib import DisplayNetwork
from lib import Histogram
from lib import ImageFunctions
import matplotlib.pyplot as plt
from lib.TransferFunctions import sigmoid, linear
import commands
import os
__author__ = "Natasza Szczypien"
"""
This code is loading face and nonface datas from the 'data' folder.
The images are grey.
The images are transformed to matrixes 19x19 pixel and then to a vector 1x361 [361 = 19x19]
Next the faces and nonfaces are stacked in one list.
This list is pushed to the backpropagation network.
The calculation takes ~30 minutes.
"""
"""
This variables describes the folder with the Imagage data and the names of the images
"""
input_nodes = 361 # The images are transformed to matrixes 19x19 pixel and then to an array 1x361 [361 = 19x19]
hidden_nodes = 1600
output_nodes = 1 # true/falase or face/nonface
positives_path = 'data/LFaceData1600'
positive_name = 'face'
positives_amound = 1600
positives_test_path = 'data/LFaceData400'
positive_test_name = 'face'
positives_test_amound = 400
negatives_path = 'data/LNonfaceData1600'
negative_name = 'B'
negatives_amound = 10
negatives_test_path = 'data/LNonfaceData400'
negatives_test_amound = 400
file_name = '.jpg.jpg'
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
def prepare_target_list(how_many, target_value):
"""
Prepares the array with matrixes with the targets
:param how_many: how many targets? = the output nodes
:param target_value: what is the target value to learn
:return: the target array: if target value is 1 and how_many is 4 => [[1],[1],[1],[1]]
"""
target = []
for one in range(how_many):
target.append([target_value])
target = np.array(target)
return target
def check_output(output):
"""
Checks if the output is a face or a nonface
:param output:
:return: a list of outputs
"""
y = []
for i in output:
print "outuput", i
if round(i) > 0:
print "face"
else:
print "non-face"
y.append(i)
return y
def test_network(bpn, test_data):
"""
Tests the network with test datas
:return: the tested output
"""
DisplayNetwork.display_green("[INFO] Started to test the network")
output = bpn.Run(np.array(test_data))
return output
def prepare_image_list(path, image_name, i_range):
"""
This code is loading face and nonface datas from the 'data' folder.
The images are grey.
The images are transformed to matrices 19x19 pixel and then to a vector 1x361 [361 = 19x19]
:param path:
:type path:
:param image_name:
:return:
"""
DisplayNetwork.display_green("[INFO] Loading the images to train the network")
positives = []
file_list = commands.getoutput('ls ./' + path + '/*.jpg | xargs -n 1 basename').split("\n")
for i in i_range:
image_name = path + '/' + file_list[i]
DisplayNetwork.display_yellow("[INFO] Loading image" + image_name)
image_matrix = ImageFunctions.turnImageToGray(image_name) # Load image as gray
reshaped = np.reshape(image_matrix, 361) # makes 19x19 matrix to 1x361 vector
positives.append(reshaped.tolist())
return np.array(positives)
def getPreZero(i):
"""
Because unfortunately the Images were saved as image_0000x.jpg.jpg
:param i:
:type i:
:return:
:rtype:
"""
pre_zero = ''
if bool(i % 10) != bool(i == 0):
pre_zero = '0000'
if round(((i % 100) / 10), 1):
pre_zero = '000'
if round(((i % 1000) / 100), 1):
pre_zero = '00'
if round(((i % 10000) / 1000), 1):
pre_zero = '0'
return pre_zero
def build_and_display_network():
"""
Build the NN with NeuralNetwork.py
Displays NN with DisplayNetwork.py
:return: backpropagation network
"""
bpn = NeuralNetwork.BackPropagationNetwork((input_nodes, hidden_nodes, output_nodes),[None, sigmoid, linear])
DisplayNetwork.displayLayers(bpn.matrixDimension)
return bpn
def start_face_recognition():
start_time = time.time()
print dir_path
bpn = build_and_display_network()
#-------------------------------------------------------------------------------
""" Prepare list of images """
faces = prepare_image_list(positives_path, positive_name, range(0, positives_amound))
non_faces = prepare_image_list(negatives_path, negative_name, range(0, negatives_amound))
""" Prepare the target """
target_faces = prepare_target_list(len(faces), 1.0)
target_non_faces = prepare_target_list(len(non_faces), -1.0)
target = np.concatenate((target_faces, target_non_faces), axis=0)
#-------------------------------------------------------------------------------
""" Train the network """
trainning_data = np.concatenate((faces,non_faces),axis=0)
y = NeuralNetwork.trainNetwork(bpn, trainning_data, target)
#-------------------------------------------------------------------------------
""" Testing the network """
""" TRAINING DATA """
result_traning_faces = test_network(bpn, faces)
Histogram.plot('Identification threshold', 'Output value for NN', 'Density', 'Face - training data', result_traning_faces)
result_traning_nonfaces = test_network(bpn, non_faces)
Histogram.plot('Identification threshold', 'Output value for NN', 'Density', 'Nonface - training data',
result_traning_nonfaces)
""" TEST DATA"""
#FACES
test_faces = prepare_image_list(positives_test_path, positive_test_name,
range(0, positives_test_amound))
result_faces = test_network(bpn, test_faces)
Histogram.plot('Identification threshold', 'Output value for NN','Density','Face - test data', result_faces)
# NONFACES
test_non_faces = prepare_image_list(negatives_test_path, negative_name,
range(0, negatives_test_amound))
result_nonfaces = test_network(bpn, test_non_faces)
Histogram.plot('Identification threshold', 'Output value for NN', 'Density', 'Nonface - test data', result_nonfaces)
print "Execution time in seconds", time.time() - start_time
|
20,052 | 4b41407e477a4c8eba62e006a5d50a4fe06eef5a | #!/usr/bin/python
import sys,os,re
from inputArgs.inputArgs import inpHand,deglobb,isglobbed
from utilities.codedir import projectsdir,codedir,nodecodedir,scratchdir
from utilities.small_utilities import chomp,todayDate,Bye
from jobs.job import job,pastry,genJobList
#check if there's a logfile with succesful outcome
def jobcompleted(logfilename):
if os.path.exists(logfilename):
if not os.system('tail -1 '+logfilename+'|grep "exit mode = 0" > /dev/null'): return 1
return 0
#generate log file name
def logname(identifier,header,outd):
branch='/'+header[1]+'/'+header
outd2=deglobb(outd,header)
logfile=outd2+'/'+identifier+header+'.log'
return logfile
#check input files exists. It is assume an input line of the form
#[/bin/mv] [-f] input1 input2 inputN targetDir
def check_input_files(inpcmd,logfile):
inputfine=1
list=inpcmd.split() ;del list[0] ;del list[len(list)-1] #list of input files
#cycle through all input files
for input in list:
if input[0]=='-': continue #it's a flag, not an input file
#if the input file does not exists, exit and report in the logfile
#we don't use os.path.exists because this function does not support unix bloging
if os.system('ls '+input+' &>/dev/null')!=0:
inputfine=0
if os.path.exists(logfile): #pre-existing logfile with same name
#if indicates success of previous job, then it doesn't matter that input
#file does not exists. We will submit the job and then job.qsub will be
#smart enough to NOT queue the job
if os.system('grep "exit mode = 0" '+logfile): return 1
else:
pastry('/bin/rm '+logfile)
pastry('touch '+logfile)
pastry('echo "input file '+input+' missing. Can not submit job" >> '+logfile)
return inputfine
ih=inpHand('Usage: preditc_struct_list.py',
' -a _A_list (list of headers)',
' -b __header (single header, in place of -a option)',
' -c _AR_outd output directory where logfile will go (will substitute /x/xxxxx by appropriate)',
' -d __wallt walltime, in days (default: 7.0)',
' -e __xflags qsub may need these extra flags. Enclose them within double quotes',
' -f __inpcmd input command line (in double quotes, x/xxxxx globbing)',
' -g __execcmd executable command line, run from the temp dir (in double quotes)',
' -h __help outputs a help message (no arguments needed)',
' -i __outcmd output command line (in double quotes)',
' -j __submode submission mode of each job (sub,bsub,rsub) def=qsub',
' -k __memlimit minimum memory reservation (def=500 (Mb))',
' -l __filesize minimum disk space reservation (def=1000 (Mb))',
' -m __shared0 libline "dir1:tbz21,dir2:tbz22", globbing allowed (def: None)',
' -n _R_identifier one word for jobname and logfile name. Will append xxxxx',
' -o __fake do we create qsub script but do not send the job? (def:no)',
' -p __ngroup send N jobs to a single node to be run concurrently (def:1)',
' -q __joblist generate a current list of jobs in queing server (def:yes)'
)
ih.parse(locals(),sys.argv)
if not submode: submode='qsub'
if not wallt: wallt='7.0'
if not memlimit: memlimit='500'
if not filesize: filesize='100'
if not header: header=None
if not shared0: shared0=None
if not ngroup:ngroup=1
else: ngroup=int(ngroup)
#joblist avoids calling qstat for every single job that we qsub
if not joblist or joblist[0] in ('y','Y'):
joblist=genJobList()
else: joblist=[]
#print 'list=',list ; print 'outd=',outd ; print 'inpcmd=',inpcmd ; print 'outcmd=',outcmd
j=job(exe='generic_job.py', exed=nodecodedir+'/python/combo_jobs', args='' )
jj=job(exe='generic_job.py', exed=nodecodedir+'/python/combo_jobs', args='' )
if not list and not header: ih.abort() #we need at least on the two inputs
if header: listl=[header,] #create a single item list
else: listl=chomp(open(list,'r').readlines())
#We need scaping of ",`,$ when inside double quotes, because unfortunately, these
#characters will be interpreted.
#example: junk.py -a "echo "Hello" " #confusion with the "
#example: junk.py -a "echo $wd" #$wd will be substituted with whatever value
#example; junk.py -a "wd=`cat junk`" #wd will be initialized, ie, the argument within quotes
# #will not be passed literally, but interpreted
execcmd=execcmd.replace('\\','\\\\') #this substitution MUST come in the first place!
execcmd=execcmd.replace('"','\\"')
execcmd=execcmd.replace('`','\`')
execcmd=execcmd.replace('$','\$')
#cicle through all headers in list, prepare a job for each header
ithgroup=0
ninthebunch=0 #current number of jobs assigned to the current group of jobs
groupexeccmd='' #line containing ngroup scripts, one for each of the ngroup jobs
bunchlist=''
for header in listl:
branch='/'+header[1]+'/'+header
jobname=identifier+header
logname=jobname+'.log'
unixcommands=''
if inpcmd : unixcommands=' -a "'+inpcmd +'" '
if execcmd: unixcommands=unixcommands+' -b "'+execcmd+'" '
if outcmd : unixcommands=unixcommands+' -c "'+outcmd +'" '
j.args=unixcommands
j.args=deglobb(j.args,header) #deglobb switches xxxxx to header, and yyyyy to xxxxx
if shared0 and isglobbed(shared0): j.shared=deglobb(shared,header)
outdH=deglobb(outd,header) #directory where logname will go
logf=outdH+'/'+logname #full file name of log file
if jobcompleted(logf): #there's a log file with a successful outcome
sys.stdout.write('COMPLETED: '+jobname+'\n')
continue
if not os.path.exists(outdH): pastry('/bin/mkdir -p '+outdH)
allclear=True #flag input files are not missing
if inpcmd: #check that input files do exist
inpcmdH=deglobb(inpcmd,header)
if not check_input_files(inpcmdH,logf): allclear=False
if allclear:
if ngroup>1: #we're bundling jobs in bundles of ngroup jobs
#create a script for this single job and add to the list
groupexeccmd+=j.scriptGen(jobname,outdH,mem_limit=memlimit,libRev='',submode='sub')+' ; '
ninthebunch+=1
bunchlist+=jobname+' '
if ninthebunch==ngroup or header==listl[-1]: #bundle is full or last header
ithgroup+=1
jj.args=' -b "'+groupexeccmd+'" '
jobname=identifier+'G'+`ithgroup`
outdir=scratchdir+'/qsub/'+todayDate()
if shared0 and not isglobbed(shared0): jj.shared=shared0 #same lib for all jobs
sys.stdout.write('\nlist of jobnames included in '+jobname+':\n'+bunchlist+'\n')
getattr(jj,submode)(jobname,outdir,wallt=wallt,mem_limit=memlimit,
file_size=filesize,extraflags=xflags,fake=fake)
ninthebunch=0 #begin another bundle
groupexeccmd=''
bunchlist=''
else:
getattr(j,submode)(jobname,outdH,wallt=wallt,mem_limit=memlimit,
file_size=filesize,extraflags=xflags,fake=fake,
joblist=joblist)
sys.exit(0)
|
20,053 | 5d98b980786154389ff263b35eb982077c50fe85 | # En este ejercicio solo se presenta "mi primer proyecto"
print ("mi primer proyecto") |
20,054 | 48d203b77eba1f3dd8b14c2bb4b68cc970e9c55a | from django.shortcuts import render,redirect,reverse
from django.views.generic import View
#导入只接受GET请求和POST请求的装饰器
from django.views.decorators.http import require_GET,require_POST
#导入form验证用的表单
from .forms import Alterform,EditAlterform,Reviewform
#导入Alter_manage的模型
from Apps.Alter_management.models import Alter_managment,Alter_managment_checked
#导入我们重构的resful文件,用于返回结果代码和消息,详细可以看resful.py文件
from utils import resful
#导入分页用的类
from django.core.paginator import Paginator
#导入时间分类
from datetime import datetime,timedelta
#将时间标记为清醒的时间
from django.utils.timezone import make_aware
#用于模糊查询
from django.db.models import Q
#用于拼接url
from urllib import parse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse,JsonResponse
from .admin import Alter_managment_resources
from Apps.Alterauth.decorators import Alter_login_required
#导入数据库字典和变更类型字典
from Apps.Alter_Dict.models import Alt_Database,Alt_Type
# Create your views here.
def login(request):
return render(request,'Alter_management/login.html')
def index_manage(request):
return render(request,"Alter_management/index.html")
# @require_GET#只接受GET请求
# # class Alter_manager_view(View):#变更管理页面,返回数据
# def Alter_manager_view (request):#变更管理页面,返回数据
# Alterd_datas=Alter_managment.objects.all()
# context={
# 'Alterd_datas':Alterd_datas
# }
# return render(request,"Alter_management/Alter.html",context=context)
# * @函数名: Alter_manager_newview
# * @功能描述: 变更管理页面视图
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 16:57:39
# * @最后编辑者: 郭军
#@staff_member_required(login_url='login')
@method_decorator(Alter_login_required,name='dispatch')
# @method_decorator(permission_required('Alter_management.change_alter_managment',login_url='/alter/index/'),name="dispatch")
class Alter_manager_newview(View):#变更管理页面,返回数据
def get(self,request):
#request.GET.get获取出来的数据都是字符串类型
page = int(request.GET.get('p',1))#获当前页数,并转换成整形,没有传默认为1
start=request.GET.get('start') #获取时间控件开始时间
end =request.GET.get('end') #获取时间控件结束时间
cxtj =request.GET.get('cxtj') #获取查询条件录入信息
#request.GET.get(参数,默认值)
#这个参数是只有没有传递参数的时候才会使用
#如果传递了,但是是一个空的字符串,也不会使用,那么可以使用 ('ReviewStatus',0) or 0
reviewStatus = int(request.GET.get('ReviewStatus',0)) #获取审核状态查询值,因为get到的都是字符串,转换成整形才能在页面中用数值对比
DatabaseType = int(request.GET.get('DatabaseType',0))
Alterd_datas = Alter_managment.objects.all().order_by('-modifytime')#获取所有数据库的数据
Databases = Alt_Database.objects.all()
AltTypes=Alt_Type.objects.all()
if start or end:#查询时间判断
if start:
start_time=datetime.strptime(start,'%Y/%m/%d')
else:
start_time = datetime(year=2019,month=5,day=1)#如果是空的 就使用默认值
if end:
#end_time = datetime.strptime(end, "%Y/%m/%d")
end_time = datetime.strptime(end, "%Y/%m/%d")+timedelta(hours=23,minutes=59,seconds=59)
else:
end_time=datetime.today()
#Alterd_datas=Alterd_datas.filter(modifytime__range=(make_aware(start_time), make_aware(end_time)))
Alterd_datas=Alterd_datas.filter(modifytime__range=(start_time, end_time))
if cxtj:#查询条件判断
#多条件模糊查询匹配,满足一个即可返回,用到Q对象格式如下
Alterd_datas=Alterd_datas.filter(Q(databaseid=cxtj)|Q(id=cxtj)|Q(altercontent__icontains=cxtj)|Q(altertypeid=cxtj)|Q(modifier__icontains=cxtj)|Q(associatedid__icontains=cxtj))
if DatabaseType:#数据库类型判断
Alterd_datas=Alterd_datas.filter(databaseid=DatabaseType)
if reviewStatus:#审核状态判断
Alterd_datas =Alterd_datas.filter(reviewstatus=reviewStatus)
paginator = Paginator(Alterd_datas, 2) # 分页用,表示每2条数据分一页
if paginator.num_pages < page:
page= paginator.num_pages
page_obj= paginator.page(page)#获取总页数
context_date =self.get_pagination_data(paginator,page_obj)#调用分页函数获取到页码
context = {
'Alterd_datas': page_obj.object_list,
'page_obj':page_obj,#将分了多少页的数据全部传过去
'paginator':page,#当前页数据
'start':start,
'end':end,
'cxtj':cxtj,
'reviewStatus':reviewStatus,
'DatabaseType':DatabaseType,
'Databases':Databases,
'AltTypes':AltTypes,
'url_query': '&'+parse.urlencode({
'start': start or '',
'end':end or '',
'cxtj':cxtj or '',
'reviewStatus':reviewStatus or 0,
'DatabaseType':DatabaseType or 0,
})#用于拼接url,让页面在查询后进行翻页,任然保留查询条件
}#返回包含分页信息的数据
context.update(context_date)#将分页数据更新到context,返回返回给页面
return render(request, "Alter_management/Alter.html", context=context)
#获取和分页功能
def get_pagination_data(self, paginator, page_obj, around_count=2):
current_page = page_obj.number
num_pages = paginator.num_pages
left_has_more = False
right_has_more = False
if current_page <= around_count + 2:
left_pages = range(1, current_page)
else:
left_has_more = True
left_pages = range(current_page - around_count, current_page)
if current_page >= num_pages - around_count - 1:
right_pages = range(current_page + 1, num_pages + 1)
else:
right_has_more = True
right_pages = range(current_page + 1, current_page + around_count + 1)
# current_page为当前页码数,count_page为每页显示数量
#strat = (current_page - 1) * count_page
start_num = (current_page - 1) * around_count
return {
# left_pages:代表的是当前这页的左边的页的页码
'left_pages': left_pages,
# right_pages:代表的是当前这页的右边的页的页码
'right_pages': right_pages,
'current_page': current_page,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'num_pages': num_pages,
'start_num':start_num
}
# * @函数名: edit_Alter_manager
# * @功能描述: 编辑变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:00:18
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
#@method_decorator(permission_required(perm='Alter_management.change_alter_managment',login_url='/'),name="dispatch")
def edit_Alter_manager(request):#变更内容编辑用
if request.user.has_perm('Alter_management.change_alter_managment'):
form =EditAlterform(request.POST)
if form.is_valid():
id=form.cleaned_data.get("id")#变更ID
AltType = form.cleaned_data.get("AltType") # '关联类型'#
AssociatedNumber =form.cleaned_data.get("AssociatedNumber") # '关联编号'#
Database = form.cleaned_data.get("Database") # '数据库'#
AlterContent =form.cleaned_data.get("AlterContent") # 变更内容
if request.user.pk ==Alter_managment.objects.get(id=id).userid:
Alter_managment.objects.filter(id=id).update(altertypeid=AltType, associatedid=AssociatedNumber, databaseid=Database, altercontent=AlterContent, modifier=request.user.username
,modifytime=datetime.now(),reviewstatus='0',userid=request.user.pk)
return resful.OK()
else:
return resful.unauth(message='您不能编辑别人的数据!')
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有编辑的权限!')
# * @函数名: delete_Alter_manager
# * @功能描述: 删除变更内容
# * @作者: 郭军
# * @时间: 2019-6-30 15:28:19
# * @最后编辑时间: 2019-9-9 17:01:02
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
def delete_Alter_manager(request):#变更内容删除用
if request.user.has_perm('Alter_management.change_alter_managment'):
id=request.POST.get("id")
try:
Alter_managment.objects.filter(id=id).delete()
Alter_managment_checked.objects.filter(alterid=id).delete()
return resful.OK()
except:
return resful.params_error(message="该变更不存在")
else:
return resful.unauth(message='您没有删除的权限!')
# * @函数名: add_Alter_managerView
# # * @功能描述: 添加变更内容
# # * @作者: 郭军
# # * @时间: 2019-6-30 15:28:19
# # * @最后编辑时间: 2019-9-3 10:00:36
# # * @最后编辑者: 郭军
class add_Alter_managerView(View):
def get(self,request):
Databases=Alt_Database.objects.all()
context={
'Databases':Databases
}
return render(request,'Alter_management/Alter.html',context=context)
def post(self,request):#添加变更内容
if request.user.has_perm('Alter_management.change_alter_managment'):
form = Alterform(request.POST)
#如果验证成功
if form.is_valid():
AltType_id=form.cleaned_data.get('AltType')
AltTypes = Alt_Type.objects.get(pk=AltType_id)
AssociatedNumber = form.cleaned_data.get('AssociatedNumber')
Database_id = form.cleaned_data.get('Database')
Database= Alt_Database.objects.get(pk=Database_id)
AlterContent=form.cleaned_data.get('AlterContent')
#判断变更内容在库中是否存在
exists=Alter_managment.objects.filter(altercontent=AlterContent).exists()
if not exists:
Alter_managment.objects.create(altertypeid=AltTypes.pk, associatedid=AssociatedNumber, databaseid=Database.pk,altercontent=AlterContent,
modifier=request.user.username,userid=request.user.pk)
return resful.OK()
else:
return resful.params_error(message="该变更内容已经存在!")
else:
error = form.get_error()
print(error)
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有添加变更的权限!')
# * @函数名: Review_Alter_manager
# * @功能描述: 变更审核
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@require_POST
@Alter_login_required
# @permission_required(perm= 'Alter_management.review_alter_managment',login_url='alter/Alter_manager/')
def Review_Alter_manager(request):#变更审核用
if request.user.has_perm('Alter_management.review_alter_managment'):
form =Reviewform(request.POST)
if form.is_valid():
id = form.cleaned_data.get('id')
ReviewStatus = form.cleaned_data.get('ReviewStatus') # '审核状态',
ReviewContent = form.cleaned_data.get('ReviewContent') # '审核内容',
#更新主表审核状态
Review=Alter_managment.objects.filter(id=id).update(reviewstatus=ReviewStatus, reviewcontent=ReviewContent, reviewer=request.user.username,reviewtime=datetime.now())
#判断主表是否审核成功
if Review:
#取得主表数据
alter_data = Alter_managment.objects.get(id=id)
#获取分表数据
alter_data_checked=Alter_managment_checked.objects.filter(alterid=id)
#判断分表是否有满足条件的数据并且审核状态是未审核
if alter_data_checked and ReviewStatus=='2':
#删除分表的数据
successdelete=alter_data_checked.delete()
if successdelete:
# 如果审核通过则复制创建主表数据到分表
return resful.OK()
else:
return resful.params_error(message='分数据删除失败')
elif alter_data_checked and ReviewStatus=='1':
Alter_managment_checked.objects.update(userid=alter_data.userid,alterid=alter_data.pk, associatedid=alter_data.associatedid,
altercontent=alter_data.altercontent,
modifier=alter_data.modifier,
modifytime=alter_data.modifytime,
reviewer=alter_data.reviewer,
reviewstatus=alter_data.reviewstatus,
reviewcontent=alter_data.reviewcontent,
reviewtime=alter_data.reviewtime,
altertypeid=alter_data.altertypeid,
databaseid=alter_data.databaseid)
return resful.OK()
else:
#如果审核通过则复制创建主表数据到分表
Alter_managment_checked.objects.create(userid=alter_data.userid,alterid=alter_data.pk,associatedid=alter_data.associatedid,altercontent=alter_data.altercontent,modifier=alter_data.modifier,modifytime=alter_data.modifytime,reviewer=alter_data.reviewer,reviewstatus=alter_data.reviewstatus,reviewcontent=alter_data.reviewcontent,reviewtime=alter_data.reviewtime,altertypeid=alter_data.altertypeid,databaseid=alter_data.databaseid)
return resful.OK()
else:
return resful.params_error(message='审核失败!')
return resful.OK()
else:
return resful.params_error(message=form.get_error())
else:
return resful.unauth(message='您没有审核的权限!')
# * @函数名: Alter_detail
# * @功能描述: 变更内容详情
# * @作者: 郭军
# * @时间: 2019-6-30 09:39:03
# * @最后编辑时间: 2019-8-30 14:41:00
# * @最后编辑者: 郭军
@Alter_login_required
def Alter_detail(request,id):#变更详情页面
Alterdeatil =Alter_managment.objects.get(id=id)
if Alterdeatil:
context = {
'Alterdeatil': Alterdeatil
}
return render(request,"Alter_management/Alter_detail.html",context=context)
else:
return resful.params_error(message='没有找到详情数据')
def test_review(request):
id=request.GET.get('id')
print('获取到的id是:',id)
datas=Alter_managment.objects.values('pk','reviewstatus','reviewcontent').filter(pk=id)
datas =list(datas)
data ={'code':200,'data':datas}
return JsonResponse(data,safe=False)
|
20,055 | dd97e9108e4800668ea0fc3bb1eb42a86a21bd10 | #!env python
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
seuil = int(sys.argv[1])
fname = '/home/raphael/Documents/UPMC_BIM/Semestre02/TME/BimProjet/CAIJava/source/AT_arc_metatrans.filtered.fasta.cleanup'
oname = '/home/raphael/Documents/UPMC_BIM/Semestre02/TME/BimProjet/CAIJava/source/AT_arc_metatrans.filtered.fasta.cleanup.len'+str(seuil)
ihandle = open(fname)
ohandle = open(oname,'w')
for record in SeqIO.parse(ihandle, 'fasta'):
if len(record.seq) > seuil * 3 :
SeqIO.write([record], ohandle, "fasta")
ohandle.close()
ihandle.close()
|
20,056 | c08e1ee9d91ed9a6b6a5494c0575a2a707ec8759 | """
You and your K-1 friends want to buy N flowers. Flower number i has cost ci. Unfortunately the seller does not want just
one customer to buy a lot of flowers, so he tries to change the price of flowers for customers who have already bought
some flowers. More precisely, if a customer has already bought x flowers, he should pay (x+1)*ci dollars to buy flower
number i.
You and your K-1 friends want to buy all N flowers in such a way that you spend the least amount of money. You can buy
the flowers in any order.
Input:
The first line of input contains two integers N and K (K <= N). The next line contains N space separated positive
integers c1,c2,...,cN.
Output:
Print the minimum amount of money you (and your friends) have to pay in order to buy all N flowers.
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
Array math
Sort the costs
Group the costs
:param cipher: the cipher
"""
N, K, C = cipher
C.sort(reverse=True)
group_cnt = N / K + 1 # 1 is the last remaining group
total_cost = 0
for i in xrange(group_cnt):
unit_cost = i + 1
total_cost += unit_cost * sum(C[i * K:(i + 1) * K])
return total_cost
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
N, K = map(int, f.readline().strip().split(' '))
C = map(int, f.readline().strip().split(' '))
cipher = N, K, C
# solve
s = "%s\n" % (Solution().solve(cipher))
print s,
|
20,057 | 93af1afd8c71a0b5651c7610cdfa2f41075df662 | a = [10, 20, 5, 30, 15]
d = [30, 35, 15, 5, 10, 20, 25]
c = [10,20,5,30]
def chopchop(temp):
a = [[temp[i], temp[i+1]] for i in range(len(temp)-1)]
if len(temp) ==2:
return [0, temp]
def cal(a, b):
return a[0] * b[0] * b[1]
def res(a, b):
return [a[0], b[1]]
def back(a):
s = []
for i in a:
s.extend(i)
d = [s[1:len(s)-1][2*i] for i in range(int((len(s)-2)/2))]
d.insert(0, a[0][0])
d.append(a[-1][1])
return d
if len(a) == 2:
c = cal(a[0], a[1])
return [c, res(a[0], a[1])]
elif len(a) == 3:
sd = a[:-1]
temp0 = cal(sd[0], sd[1]) + cal(res(sd[0], sd[1]), a[-1])
sd = a[1:]
temp1 = cal(sd[0], sd[1]) + cal(a[0], res(sd[0], sd[1]))
return [min([temp0, temp1]), res(a[0], a[2])]
else:
mem = []
result = []
final = []
for i in range(len(a)-1):
t = back(a[:i+1])
f = back(a[i+1:])
mem.extend([t,f])
for i in mem:
result.append(chopchop(i))
for i in range(int(len(result)/2)):
final.append([result[2*i][0] + result[2*i + 1][0] + cal(result[2*i][1], result[2*i + 1][1]),
res(result[2*i][1], result[2*i + 1][1])])
return min(final)
chopchop(a)
chopchop(d)
chopchop(c) |
20,058 | 61f80e6a48af70dec50d40c35b5fd8fffdf143b6 | import warnings
warnings.warn("twisted.protocols.xmlstream is DEPRECATED. import twisted.words.xish.xmlstream instead.",
DeprecationWarning, stacklevel=2)
from twisted.words.xish.xmlstream import *
|
20,059 | d4e9c509a5da7e9b5b302abc29850edd3a18fca6 | from scipy.constants import speed_of_light
import lmfit
import numpy as np
import matplotlib.pyplot as plt
def calculate_index_and_derivative(wl):
"""
SellMeir coefficient for fused Silica
:param wl:
:return:
"""
index = np.sqrt(1 + (0.6961663 * wl * wl) / (wl * wl - 0.0684043 * 0.0684043)
+ (0.4079426 * wl * wl) / (wl * wl - 0.1162414 * 0.1162414)
+ (0.8974794 * wl * wl) / (wl * wl - 9.896161 * 9.896161)
)
index_derivative = \
(
- (1.79496 * wl * wl * wl) / (pow(-97.934 + wl * wl, 2))
+ (1.79496 * wl) / (-97.934 + wl * wl)
- (0.815885 * wl * wl * wl) / (pow(-0.0135121 + wl * wl, 2))
+ (0.815885 * wl) / (-0.0135121 + wl * wl)
- (1.39233 * wl * wl * wl) / (pow(-0.00467915 + wl * wl, 2))
+ (1.39233 * wl) / (-0.00467915 + wl * wl)
) \
/ \
(2 * np.sqrt(
1
+ (0.897479 * wl * wl) / (-97.934 + wl * wl)
+ (0.407943 * wl * wl) / (-0.0135121 + wl * wl)
+ (0.696166 * wl * wl) / (-0.00467915 + wl * wl)
)
)
return index, index_derivative
def calculate_index_and_derivative_sellmeier(wl, B1, C1, B2, C2, B3, C3):
"""
SellMeir coefficient for fused Silica
:param wl:
B1 : 0.6961663
B2 : 0.4079426
B3 : 0.8974794
C1 : 0.0684043**2= 0.00467914825849
C2 : 0.1162414**2 = 0.01351206307396
C3 : 9.896161**2 = 97.934002537921
:return:
"""
index = np.sqrt(1 + (B1 *wl**2) / (wl**2 - C1)
+ (B2 * wl**2) / (wl**2 - C2)
+ (B3 * wl**2) / (wl**2 - C3)
)
index_derivative = \
(
(-2 * B1 * wl**3) / ((-C1 + wl**2)**2) + (2*B1 * wl) / (-C1 + wl**2)
+ (-2 * B2 * wl**3) / ((-C2 + wl**2)**2) + (2 * B2 * wl) / (-C2 + wl**2)
+ (-2 * B3 * wl**3) / ((-C3 + wl**2)**2) + (2 * B3 * wl) / (-C3 + wl**2) )/(2*index)
return index, index_derivative
def calculate_transit_time(wl, fiber_length):
index, index_derivative = calculate_index_and_derivative(wl)
group_velocity = speed_of_light / (index - wl * index_derivative)
return fiber_length / group_velocity
def calculate_transit_time_s(wl, fiber_length, B1, C1, B2, C2, B3, C3):
index, index_derivative = calculate_index_and_derivative(wl, B1, C1, B2, C2, B3, C3)
group_velocity = speed_of_light / (index - wl * index_derivative)
return fiber_length / group_velocity
def get_diff_btw_delay_and_transit_time(wl, transit_time_calibration, delay, fiber_length):
return (calculate_transit_time(wl, fiber_length) - transit_time_calibration) - delay
wl_calib = 0.532
microtime_calib = 1227
fiber_length = 100
microtimes_x = np.linspace(0, 4095)
microtime_to_wl_tab = np.zeros(microtimes_x.size)
transit_time_calibration = calculate_transit_time(wl_calib, fiber_length)
print("transit_time_calibration : ", transit_time_calibration)
# TODO from exp_param.
micro_channel_time_duration = 25E-9
# IR photon arrive first.
# NB : we don't take into account a possible wrapping of the spectra.
# Photon with a shorter microtime than the calibration one are more red
# (assuming that there is no fluorescence decay)
# delay_with_calib = (microtimes_x - microtime_calib) * micro_channel_time_duration
#
# i = 0
# for microtime in microtimes_x:
# delay_with_calib = (microtime - microtime_calib) * micro_channel_time_duration
# wl, r = bisect(f=get_diff_btw_delay_and_transit_time, a=0.38, b=1, args=(transit_time_calibration, delay_with_calib, fiber_length))
# microtime_to_wl_tab[i] = wl
# i += 1
#
# plt.plot(microtimes_x, microtime_to_wl_tab)
# plt.show()
wls = np.linspace(0.35, 1, 100)
index = np.zeros(wls.size)
index_derivative = np.zeros(wls.size)
index_s = np.zeros(wls.size)
index_derivative_s = np.zeros(wls.size)
transit_time = np.zeros(wls.size)
i=0
for wl in wls:
index[i], index_derivative[i] = calculate_index_and_derivative(wl)
index_s[i], index_derivative_s[i] = calculate_index_and_derivative_sellmeier(wl, B1=0.6961663, C1=0.00467914825849, B2=0.4079426, C2=0.01351206307396, B3=0.8974794, C3=97.934002537921)
transit_time[i] = (calculate_transit_time(wl, fiber_length) - transit_time_calibration) * 1E9
i+=1
def get_polynom(fiber_length, wl_calib, micro_calib, micro_time_duration_ns, deg=9):
wls = np.linspace(0.35, 1, 100)
delays_with_calib = np.zeros(wls.size)
transit_time_calibration = calculate_transit_time(wl_calib, fiber_length)
for wl in wls:
delays_with_calib[i] = (calculate_transit_time(wl, fiber_length) - transit_time_calibration)
delays_with_calib_microtime = delays_with_calib * 1E9/micro_time_duration_ns
p_fit_inv = np.polyfit(delays_with_calib_microtime, wls, deg)
print(p_fit_inv)
polynomial_interpolation_inverse = np.poly1d(p_fit_inv)
# Interpolation of the theoric/experimental data -> wavelength vs delay
p_fit = np.polyfit(wls, transit_time, deg=6)
print(p_fit)
polynomial_interpolation = np.poly1d(p_fit)
# Interpolation of the inverse -> delay vs wavelength
p_fit_inv = np.polyfit(transit_time, wls, deg=9)
print(p_fit_inv)
polynomial_interpolation_inverse = np.poly1d(p_fit_inv)
# plt.plot(wls, index)
# plt.plot(wls, index_s)
# plt.show()
#
# plt.plot(wls, index_derivative)
# plt.plot(wls, index_derivative_s)
# plt.show()
# plt.plot(wls*1E3, transit_time)
# plt.plot(wls*1E3, polynomial_interpolation(wls))
# plt.xlabel("wavelength in nm")
# plt.ylabel("delay in ns")
# plt.show()
#
# plt.plot(transit_time, wls)
# plt.plot(transit_time, polynomial_interpolation_inverse(transit_time))
# plt.ylabel("wavelength in nm")
# plt.xlabel("delay in ns")
# plt.show()
# Fit from experimental data
wl_calib = 0.531
y = [38.256,37.21,35.72,31.284,30.475,27.837,27.393,27.028,26.557,26.244,25.879,25.644,25.06,24.53,24.25,23.7,23.48,23.21,22.87,22.66,22.35,22.04,21.8,21.23,21.048,20.839,20.682,18.619]
x = [413.0,423.0,435,483,495,531,539,548,557,567,574,580,591,606,618,633,647,659,669,681,695,709,725,752,762,780,795,1072]
y = np.array(y) - 27.837
x = np.array(x)
x /= 1000.0
x -= 0.007
def delay_function(x, fiber_length, calib_error):
global transit_time_calibration
# x -= calib_error
transit_time = (calculate_transit_time(x, fiber_length) - transit_time_calibration) * 1E9
return transit_time
def delay_function_w_sellmeir(x, fiber_length, calib_error, B1, C1, B2, C2, B3, C3):
global wl_calib
transit_time_calibration = calculate_transit_time(wl_calib, fiber_length)
# x -= calib_error
transit_time = (calculate_transit_time(x, fiber_length) - transit_time_calibration) * 1E9
return transit_time
gmodel = lmfit.Model(delay_function)
result = gmodel.fit(y, x=x, fiber_length=100, calib_error=0.000)
gmodel = lmfit.Model(delay_function_w_sellmeir)
result = gmodel.fit(y, x=x, fiber_length=100, calib_error=0.000, B1=0.6961663, C1=0.00467914825849, B2=0.4079426, C2=0.01351206307396, B3=0.8974794, C3=97.934002537921)
print(result.fit_report())
plt.style.use('ggplot')
plt.rcParams['lines.linewidth'] = 4
plt.plot(x, y, 'bo', label="exp data")
plt.plot(x, result.best_fit, 'r-', label="Fit with L=121m", alpha=0.7)
plt.xlabel("wavelength (µm)")
plt.ylabel("Delay (ns)")
plt.title("Calibration of the GI50 Sedi Fibre (100m), effective length=125m")
plt.legend()
plt.savefig("Calib_fiber.png")
plt.show()
|
20,060 | 5e701bef51a5531e196aa6c69f905a2af689532e | # This script delivers the trimmed tweets back to rethink
import rethinkdb as r
import sys
import json
from conn import conn
data = sys.stdin.readlines()
data = ''.join(data)
tweets = json.loads(data)
for tweet in tweets:
r.db('aggregator').table('trimmed').insert(tweet).run()
|
20,061 | 11c00fcb70018a8f5b03c4c6c61af0a4037d454f | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
def recursive(root: TreeNode, nums:List[int]) -> None:
if root.left:
recursive(root.left, nums)
nums.append(root.val)
if root.right:
recursive(root.right, nums)
queue = []
nums = []
node = root
while queue or node:
if node:
queue.append(node)
node = node.left
else:
node = queue.pop()
nums.append(node.val)
node = node.right
#recursive(root, nums)
low, high = 0, len(nums) - 1
while low < high:
value = nums[low] + nums[high]
if value == k:
return True
elif value < k:
low += 1
else:
high -= 1
return False
|
20,062 | 4207819969b40119d158e9a357ca2344ab580ee1 | # You are playing a Flip Game with your friend.
#
# You are given a string currentState that contains only '+' and '-'. You and
# your friend take turns to flip two consecutive "++" into "--". The game ends
# when a person can no longer make a move, and therefore the other person will be the
# winner.
#
# Return all possible states of the string currentState after one valid move.
# You may return the answer in any order. If there is no valid move, return an
# empty list [].
#
#
# Example 1:
#
#
# Input: currentState = "++++"
# Output: ["--++","+--+","++--"]
#
#
# Example 2:
#
#
# Input: currentState = "+"
# Output: []
#
#
#
# Constraints:
#
#
# 1 <= currentState.length <= 500
# currentState[i] is either '+' or '-'.
#
# Related Topics String 👍 150 👎 354
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def generatePossibleNextMoves(self, currentState: str) -> List[str]:
if len(currentState) < 2:
return []
res = []
for i in range(len(currentState) - 1):
if currentState[i:i + 2] == '++':
res.append(currentState[:i] + '--' + currentState[i + 2:])
return res
# leetcode submit region end(Prohibit modification and deletion)
|
20,063 | 4da0db1f80e27bd02f3a30ac0a1a409c1124344e | x=input("please enter your string :")
y= x[::-1]
if(x == y):
print("palindrome string")
else:
print("not palindrme string") |
20,064 | 079720a13477ddfd3ef4507ea43bddad0ce9c90e | import socket
from diffie_hellman import generate_q, generate_a, generate_public_key, generate_symmetric_key
def server(host='localhost', port=8082):
data_payload = 2048 # The maximum amount of data to be received at once
# Create a TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Enable reuse address/port
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind the socket to the port
server_address = (host, port)
print("Starting up echo server on %s port %s" % server_address)
sock.bind(server_address)
# Listen to clients, argument specifies the max no. of queued connections
sock.listen(5)
print("Waiting to receive message from client")
client, address = sock.accept()
options = {
1: generate_q,
2: generate_a,
3: generate_public_key,
4: generate_symmetric_key,
}
while True:
option_input = int(input("Escolha a ação desejada: \n"
"1 - Gerar q \n"
"2 - Gerar a \n"
"3 - Gerar chave pública \n"
"4 - Gerar chave simétrica \n"
"5 - Enviar mensagem \n"
"6 - Receber mensagem \n"
"7 - Sair \n"))
if option_input in options.keys():
result = options.get(option_input)()
print(result)
if option_input == 5:
message = input("Digite a mensagem a ser enviada: \t")
print("Sending %s" % message)
client.sendall(message.encode('utf-8'))
if option_input == 6:
data = client.recv(2048)
print("Received: %s" % data)
if option_input == 7:
client.close()
break
server()
|
20,065 | c9695651db9852a523997fdfd03da30490675037 | import socket, time
import select
class tcp_ip_connection():
def __init__(self, ip, port, phase, tree_loc, shot):
self.ip = ip
self.port = port
self.phase = phase
self.tree_loc = tree_loc
self.shot = shot
self.termination_character = '\r\n'
self.sep_char = chr(0x09)
print ' Building string to send to Labview:'
self.send_string = phase + self.sep_char + str(self.tree_loc) + self.sep_char + str(self.shot) + self.termination_character
print ' ' + self.send_string.rstrip(self.termination_character)
def send_receive(self):
'''Send the command, and get the return status
SH:20Mar2013
'''
print ' Connecting to host: %s:%s'%(self.ip, self.port)
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setblocking(0)
self.s.settimeout(3)
self.s.connect((str(self.ip), int(self.port)))
print ' Connected, sending string:%s'%(self.send_string.rstrip(self.termination_character))
self.s.send(self.send_string)
print ' Waiting for return message'
data = ''
count=0
while (not data.endswith(self.termination_character)) and count<3:
ready = select.select([self.s], [], [], 2)
if ready[0]:
data += self.s.recv(4096)
print ' ' + data
ends_with_term = data.endswith(self.termination_character)
print ' data ends with term character:', ends_with_term
count += 1
print ' Finished receiving data, returned string :%s'%(data.rstrip(self.termination_character))
print ' Closing socket'
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
print ' Checking data'
if not ends_with_term:
raise RuntimeError(' Received message does not end in termination character')
self.data = data.rstrip(self.termination_character)
if len(self.data)>0:
self.return_value = self.data[0]
if len(self.data)>2:
self.return_message = self.data[1:]
else:
self.return_message = ''
time.sleep(0.1)
if self.return_value=='0':
print ' Success, returned message:%s'%(self.return_message)
elif self.return_value=='1':
raise RuntimeError(' Labview failed, returned message:%s'%(self.return_message))
else:
raise RuntimeError(' Labview failed unknown return value - not 1 or 0, returned message:%s'%(self.data))
|
20,066 | aea2904de86414281c0745cb4dae62f6a7100695 | # -*- coding: utf-8 -*-
"""
SQL Server Connect
April 2019 - Greg Wilson, Slalom
"""
import pyodbc
from pandas import read_sql
from sqlalchemy import create_engine
import urllib
class SqlServerConnect:
'''
This is a class for interfacing with SQL Server.
Please do not change this code. This is meant to serve as a quick way to
experiment with data in your SQL Server.
Copy any of the code you would like to use into a new file.
A production implementation of this class would require the following:
- Implementing this code in a python package / module
- Adding some sort of version control
- Adding tests (e.g. pytest)
'''
def __init__(self, server='', database=''):
'''
Returns a SqlServerConnect object.
Args:
:server (str): server name; Default is ''
:database (str): database name; Default is ''
'''
self.server = server
self.database = database
def get_connection(self):
'''Returns a pyodbc connection object'''
return pyodbc.connect(driver='{SQL Server}',
server=self.server,
database=self.database,
trusted_connection=True)
def get_sql_engine(self):
'''Returns a SQLAlchemy engine'''
template = 'DRIVER={{SQL Server Native Client 10.0}};SERVER={};DATABASE={};trusted_connection=yes'
params = urllib.parse.quote_plus(template.format(self.server, self.database))
return create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
def read_sql(self, sql):
'''Takes a SQL SELECT statement and returns a pandas DataFrame'''
with self.get_connection() as conn:
return read_sql(sql, conn)
def write_df(self, df, tablename, schema=None, if_exists='fail'):
'''
Replicates the pandas.DataFrame.to_sql function.
:df (pandas.DataFrame): DataFrame to be written
:tablename (str): The name of the target table
:schema (str): The schema of the target table
:if_exists: How to behave if the table already exists.
* fail (default): raise a ValueError
* replace: drop the table before inserting new values
* append: insert new values to the existing table
'''
df.to_sql(name=tablename,
con=self.get_sql_engine(),
schema=schema,
if_exists=if_exists,
index=False)
def execute_sql(self, sql):
'''
Executes a SQL command.
'''
with self.get_connection() as conn:
try:
conn.execute(sql)
except Exception as e:
print('SQL Execution failed!\n', e)
|
20,067 | df95d0aebcf4fdf9d858e53f830179a2819037c0 | from .signup import SignupView
from .login import LoginView
from .mypage import MyPageView
from .logout import LogoutView
|
20,068 | aa764ad21e576645e8440f3bfb6d3de40a3ba0e4 | import inspect
from abc import abstractmethod
from dataclasses import dataclass
from optimization.min_cost_flow.models.problem import FlowProblem
@dataclass
class BaseTransformer:
"""
Базовый класс для трансформеров — штуковин, которые преобразуют задачу,
а потом преобзаруют обратно ответ, получая ответ на оригинальную задачу.
С помощью них мы можем сводить одни задачи к другим
"""
@abstractmethod
def transform(self, p: FlowProblem):
pass
@abstractmethod
def restore(self, p: FlowProblem):
pass
|
20,069 | cbf1fc9b81028b2be316522309b32dc0f2ac1187 | a = int(input())
b = int(input())
c = int(input())
if((a+b+c)!=180):
print("Error")
elif(a==60 and b==60 and c==60):
print("Equilateral")
elif((a+b+c)==180 and (a==b or b==c or a==c)):
print("Isosceles")
elif((a+b+c)==180)and (a!=b and b!=c and a!=c):
print("Scalene")
|
20,070 | a9e588649aa29e3fcd0431156491eddf64123241 | cont3 = 0
cont2 = 0
cont1 = 0
codigo = 1
while codigo != 4:
codigo = int(input())
if codigo == 1:
cont1 += 1
elif codigo == 2:
cont2 += 1
elif codigo == 3:
cont3 += 1
print('MUITO OBRIGADO')
print('Alcool: %d' %(cont1))
print('Gasolina: %d' %(cont2))
print('Diesel: %d' %(cont3))
|
20,071 | 362eecc9ec0a2d1e8df850bd24f406e33dfabb09 | #coding:utf-8
from __future__ import division
__author__ = 'bater.makhabel'
import os
import re
import sys
import json
import time
import pickle
import builtins
import itertools
from datetime import datetime
# Deal with bibliography file in the format same as
# ./bibliography/KDD/2019/Research_Track_Papers_Full_List.txt
def check_line_format_kdd_2019(lines=None):
line_no = 1
for idx in range(len(lines)//2):
if( re.match('Authors: ', lines[2*idx:(2*idx+2)][1]) == None):
print(f"line_no = {line_no}")
print(lines[2 * idx:(2 * idx + 2)][1])
print(re.match('Authors: ', lines[2 * idx:(2 * idx + 2)][1]))
print(re.split('^Authors: ', lines[2 * idx:(2 * idx + 2)][1]))
break
line_no = line_no + 1
if line_no == len(lines):
print(f"line_no = {line_no} != the lines of input file!")
exit(0)
return(None)
# Deal with bibliography file in the format same as
# ./bibliography/KDD/2017/Research_Track_Papers_Full_List.txt
def check_line_format_kdd_2017(lines=None):
line_no = 1
for idx in range(len(lines)//2):
if( re.match('Author\(s\):', lines[2*idx:(2*idx+2)][1]) == None):
print(f"line_no = {line_no}")
print(lines[2 * idx:(2 * idx + 2)])
print(lines[2 * idx:(2 * idx + 2)][1])
print(re.match('Author\(s\):', lines[2 * idx:(2 * idx + 2)][1]))
print(re.split('^Author\(s\):', lines[2 * idx:(2 * idx + 2)][1]))
exit(0)
line_no = line_no + 1
if line_no == len(lines):
print(f"line_no = {line_no} != the lines of input file!")
exit(0)
return(None)
# Deal with bibliography file in the format same as
# ./bibliography/KDD/2012/Research_Track_Full_lisit.txt
def check_line_format_kdd_2012(lines=None):
line_no = 1
for idx in range(len(lines)//4):
if( (re.match('Title:', lines[4*idx:(4*idx+4)][1]) == None) & (re.match('Author\(s\):', lines[4*idx:(4*idx+4)][1]) == None) ):
print(f"line_no = {line_no}")
print(lines[4*idx:(4*idx+4)])
print(lines[4*idx:(4*idx+4)][1])
print(re.match('Title:', lines[4*idx:(4*idx+4)][1]))
print(re.match('Author\(s\):', lines[4*idx:(4*idx+4)][2]))
print(re.split('^Title:', lines[4*idx:(4*idx+4)][1]))
print(re.split('^Author\(s\):', lines[4*idx:(4*idx+4)][2]))
exit(0)
line_no = line_no + 1
if line_no*4 != len(lines)+1:
print(f"line_no = {line_no} != the lines of input file, {len(lines)}!")
exit(0)
return(None)
# Deal with bibliography file in the format same as ./bibliography/bibliography.py
def check_line_format(line,line_no):
if x is None:
#print(x)
#print(f"line_no = {line_no}")
line_no = line_no + 1
elif x[0] != '+':
print(x)
print(f"line_no = {line_no}")
def sort_bibliography(input_file_path, output_file_path):
f = open(input_file_path)
lines = f.readlines()
f.close()
line_no = 1
x_dict = {}
for x in map(lambda x: (re.split('[\[\]]',x)[1],x), lines):
#print(x)
#print(re.findall('\d+', str(x[0])))
check_format = False
if check_format:
check_line_format(line=x,line_no=line_no)
line_no = line_no + 1
new_year_key = re.findall('\d+', str(x[0]))[0]
if int(new_year_key)<20:
new_year_key = "20" + new_year_key
else:
new_year_key = "19" + new_year_key
if new_year_key in x_dict.keys():
x_dict[new_year_key].append(x[1])
else:
x_dict[new_year_key] = [x[1]]
print(f"item count = {len(x_dict)}")
x_dict_sorted = sorted(x_dict.items(),key=lambda item:item,reverse=True)
print(f"item count = {len(x_dict_sorted)}")
for x in x_dict_sorted:
print(x[0],x)
with open(output_file_path, "a") as result_file:
for books_per_year in x_dict_sorted:
print(f"Year: {books_per_year[0]}")
for book in books_per_year[1]:
#print(book)
result_file.write(book)
def sort_bibliography_kdd_2019(input_file_path, output_file_path, pub_year=None):
f = open(input_file_path)
lines = f.readlines()
f.close()
check_line_format_kdd(lines=lines)
with open(output_file_path, "a") as result_file:
line_no = 1
for idx in range(len(lines)//2):
print(f"line_no = {line_no}")
print(lines[2*idx:(2*idx+2)])
print(re.match('Authors: ', lines[2*idx:(2*idx+2)][1]))
print(re.split('^Authors: ', lines[2*idx:(2*idx+2)][1]))
merged_line = f"+ [{pub_year[2:]}], " + re.split('^Authors: ', lines[2*idx:(2*idx+2)][1])[1].replace("\n","") + ", " + lines[2*idx:(2*idx+2)][0].replace("\n",f", KDD{pub_year}\n")
result_file.write(merged_line)
line_no = line_no + 1
def sort_bibliography_kdd_2018(input_file_path, output_file_path, pub_year=None):
f = open(input_file_path)
lines = f.readlines()
f.close()
check_line_format_kdd(lines=lines)
with open(output_file_path, "a") as result_file:
line_no = 1
for idx in range(len(lines)//2):
print(f"line_no = {line_no}")
print(lines[2*idx:(2*idx+2)])
merged_line = f"+ [{pub_year[2:]}], " + lines[2*idx:(2*idx+2)][1].replace("\n","") + ", " + lines[2*idx:(2*idx+2)][0].replace("\n",f", KDD{pub_year}\n")
result_file.write(merged_line)
line_no = line_no + 1
# uses for 2017, 2016
def sort_bibliography_kdd_2017(input_file_path, output_file_path, pub_year=None):
f = open(input_file_path)
lines = f.readlines()
f.close()
check_line_format_kdd_2017(lines=lines)
with open(output_file_path, "a") as result_file:
line_no = 1
for idx in range(len(lines)//2):
print(f"line_no = {line_no}")
print(lines[2*idx:(2*idx+2)])
print(re.match('Author\(s\):', lines[2*idx:(2*idx+2)][1]))
print(re.split('^Author\(s\):', lines[2*idx:(2*idx+2)][1]))
merged_line = f"+ [{pub_year[2:]}], " + re.split('^Author\(s\):', lines[2*idx:(2*idx+2)][1])[1].replace("\n","") + ", " + lines[2*idx:(2*idx+2)][0].replace("\n",f", KDD{pub_year}\n")
result_file.write(merged_line)
line_no = line_no + 1
# uses for 2012
def sort_bibliography_kdd_2012(input_file_path, output_file_path, pub_year=None):
f = open(input_file_path)
lines = f.readlines()
f.close()
check_line_format_kdd_2012(lines=lines)
with open(output_file_path, "a") as result_file:
line_no = 1
for idx in range(len(lines)//4):
print(f"line_no = {line_no}")
print(lines[4*idx:(4*idx+4)])
print(re.match('Title:', lines[4*idx:(4*idx+4)][1]))
print(re.match('Author\(s\):', lines[4*idx:(4*idx+4)][2]))
print(re.split('^Title:', lines[4*idx:(4*idx+4)][1]))
print(re.split('^Author\(s\):', lines[4*idx:(4*idx+4)][2]))
merged_line = f"+ [{pub_year[2:]}], " + re.split('^Author\(s\):', lines[4*idx:(4*idx+4)][2])[1].replace("\n","") + ", " + re.split('^Title:', lines[4*idx:(4*idx+4)][1])[1].replace("\n",f", KDD{pub_year}\n")
result_file.write(merged_line)
line_no = line_no + 1
# uses for 2015
def sort_bibliography_kdd_2015(input_file_path, output_file_path, pub_year=None):
f = open(input_file_path)
lines = f.readlines()
f.close()
with open(output_file_path, "a") as result_file:
line_no = 1
for idx in range(len(lines)//2):
print(f"line_no = {line_no}")
print(lines[2*idx:(2*idx+2)])
merged_line = f"+ [{pub_year[2:]}], " + lines[2*idx:(2*idx+2)][1].replace("\n","") + ", " + lines[2*idx:(2*idx+2)][0].replace("\n",f", KDD{pub_year}\n")
result_file.write(merged_line)
line_no = line_no + 1
# uses for 2019
def sort_bibliography_cikm_2019(input_file_path, output_file_path, pub_year=None):
f = open(input_file_path)
lines = f.readlines()
f.close()
with open(output_file_path, "a") as result_file:
line_no = 1
for idx in range(len(lines)//2):
print(f"line_no = {line_no}")
print(lines[2*idx:(2*idx+2)])
merged_line = f"+ [{pub_year[2:]}], " + lines[2*idx:(2*idx+2)][1].replace("\n","") + ", " + lines[2*idx:(2*idx+2)][0].replace("\n",f", CIKM{pub_year}\n")
result_file.write(merged_line)
line_no = line_no + 1
# uses for 2018
def sort_bibliography_cikm_2018(input_file_path, output_file_path, pub_year=None):
f = open(input_file_path)
lines = f.readlines()
f.close()
with open(output_file_path, "a") as result_file:
line_no = 1
for idx in range(len(lines)//3):
print(f"line_no = {line_no}")
print(lines[3*idx:(3*idx+3)])
merged_line = f"+ [{pub_year[2:]}], " + lines[3*idx:(3*idx+3)][1].replace("\n","") + ", " + lines[3*idx:(3*idx+3)][0].replace("\n",f", CIKM{pub_year}\n")
result_file.write(merged_line)
line_no = line_no + 1
def load_and_sort_bibliography():
input_filename = "bibliography.txt"
output_filename = "sorted_"+str(time.strftime('%Y%m%d%H%m%S', time.localtime())) + input_filename
sort_bibliography(input_file_path=input_filename,output_file_path=output_filename)
def load_and_sort_bibliography_from_kdd_local():
pub_year = "2015"
input_filenames = []
root_dir = f"bibliography/KDD/{pub_year}/"
for root, dirs, files in os.walk(root_dir):
print(root)
print(dirs)
print(files)
input_filenames = files
for input_filename in input_filenames:
output_filename = input_filename.split(".")[0] + str(time.strftime('%Y%m%d%H%m%S', time.localtime())) + "." + input_filename.split(".")[1]
print(input_filename)
print(output_filename)
input_filename = root_dir + input_filename
output_filename = root_dir + output_filename
sort_bibliography_kdd_2015(input_file_path=input_filename,output_file_path=output_filename,pub_year=pub_year)
def load_and_sort_bibliography_from_kdd_with_scrapy():
pub_year = "2015"
input_filenames = []
root_dir = f"bibliography/KDD/{pub_year}/"
for root, dirs, files in os.walk(root_dir):
print(root)
print(dirs)
print(files)
input_filenames = files
for input_filename in input_filenames:
output_filename = input_filename.split(".")[0] + str(time.strftime('%Y%m%d%H%m%S', time.localtime())) + "." + input_filename.split(".")[1]
print(input_filename)
print(output_filename)
input_filename = root_dir + input_filename
output_filename = root_dir + output_filename
sort_bibliography_kdd_2015(input_file_path=input_filename,output_file_path=output_filename,pub_year=pub_year)
def load_and_sort_bibliography_from_cikm_local():
pub_year = "2018"
input_filenames = []
root_dir = f"bibliography/CIKM/{pub_year}/"
for root, dirs, files in os.walk(root_dir):
print(root)
print(dirs)
print(files)
input_filenames = files
for input_filename in input_filenames:
output_filename = input_filename.split(".")[0] + str(time.strftime('%Y%m%d%H%m%S', time.localtime())) + "." + input_filename.split(".")[1]
print(input_filename)
print(output_filename)
input_filename = root_dir + input_filename
output_filename = root_dir + output_filename
sort_bibliography_cikm_2018(input_file_path=input_filename,output_file_path=output_filename,pub_year=pub_year)
if __name__ == "__main__":
load_and_sort_bibliography_from_cikm_local()
|
20,072 | 40af738e1e93face139aebff06b859e36521a998 | # Problem Set 4A
# Name: Bilin Chen
# Collaborators: None
# Time Spent: x:xx
def get_permutations(sequence):
'''
Enumerate all permutations of a given string
sequence (string): an arbitrary string to permute. Assume that it is a
non-empty string.
You MUST use recursion for this part. Non-recursive solutions will not be
accepted.
Returns: a list of all permutations of sequence
Example:
>>> get_permutations('abc')
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: depending on your implementation, you may return the permutations in
a different order than what is listed here.
'''
if len(sequence) == 1:
# base case
permutations = []
permutations.append(sequence)
return permutations
else:
first_character = sequence[0]
rest_characters = sequence[1:]
permutations = get_permutations(rest_characters)
new_permutations = []
for term in permutations:
for i in range(len(term)+1):
new_term = term[:i] + first_character + term[i:]
new_permutations.append(new_term)
return new_permutations
if __name__ == '__main__':
# #EXAMPLE
# example_input = 'abc'
# print('Input:', example_input)
# print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
# print('Actual Output:', get_permutations(example_input))
# # Put three example test cases here (for your sanity, limit your inputs
# to be three characters or fewer as you will have n! permutations for a
# sequence of length n)
print('========', 'Test 1', '========')
test1 = 'a'
print('Input:', test1)
print('Expected Output:', ['a'])
print('Actual Output', get_permutations(test1))
print()
print('========', 'Test 2', '========')
test2 = 'abc'
print('Input:', test2)
print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
print('Actual Output', get_permutations(test2))
print()
print('========', 'Test 3', '========')
test3 = 'xyz'
print('Input:', test3)
print('Expected Output:', ['xyz', 'yxz', 'yzx', 'xzy', 'zxy', 'zyx'])
print('Actual Output', get_permutations(test3))
print()
print(get_permutations('abcd'))
|
20,073 | 9341ad318542a9a5fac5af8d7be09a4482f36722 | import random
card_suit_list = ["Hearts", "Diamonds", "Clubs", "Spades"]
card_value_list = ["A","2","3","4","5","6","7","8","9","10","J","Q","K"]
class Card:
def __init__(self, suit, value):
if(card_suit_list.__contains__(suit) and card_value_list.__contains__(value)):
self.suit = suit
self.value = value
else:
print("ERROR: Insert a correct suit and value")
self.suit="null"
self.value="null"
def __repr__(self):
return (f"{self.value} of {self.suit}")
class Deck:
def __init__(self):
self.card_deck = []
def deal(self, *card_list):
for card in card_list:
if(self.card_deck.__contains__(card)):
self.card_deck.remove(card)
else:
self.card_deck.append(card)
def shuffle(self):
random.shuffle(self.card_deck)
def __repr__(self):
str_to_ret = "Deck has: \n"
for card in self.card_deck:
str_to_ret += (f"{card.value} of {card.suit}\n")
return str_to_ret
if __name__=="__main__":
card1 = Card("Hearts", "J")
card2 = Card("Diamonds", "4")
card3 = Card("Spades", "K")
deck1 = Deck()
deck1.deal(card1, card2, card3)
print(deck1)
deck1.shuffle()
print(deck1)
deck1.deal(card1)
print(deck1) |
20,074 | 96d9834fd127bc198b929c93da14ef7f6be22732 | from django.db import models
# Create your models here.
class Center(models.Model):
code = models.CharField(max_length=2)
city = models.CharField(max_length=64)
def __str__(self):
return f"{self.city} ({self.code})"
class Transport(models.Model):
# origin = models.CharField(max_length=64)
origin = models.ForeignKey(Center, on_delete=models.CASCADE, related_name="origin_city")
destination = models.ForeignKey(Center, on_delete=models.CASCADE, related_name="dest_city")
#destination = models.CharField(max_length=64)
distance = models.IntegerField()
def __str__(self):
return f"{self.id}. {self.origin} -с {self.destination}"
# python manage.py makemigrations
# python manage.py migrate
# python manage.py shell
# from transport.models import Transport
# t = Transport(origin="Ulaanbaatar", destination="Arkhangai", distance=520)
# t = Transport(origin="Улаанбаатар", destination="Архангай", distance=520)
# t.save()
# trans = Transport.objects.all()
# clear screen:
# import os
# os.system('cls||clear')
class Passenger(models.Model):
lastname = models.CharField(max_length=64)
firstname = models.CharField(max_length=64)
transports = models.ManyToManyField(Transport, blank=True, related_name="passengers")
def __str__(self):
return f"{self.lastname} овогтой {self.firstname}" |
20,075 | 196f1e2dba58c5ac783ee6b7c4bc22fc187c1798 | from argparse import ArgumentParser
import pickle
import random
import torch
from torch import optim, nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from .methods import get_method, setup_parser as methods_setup_parser, DEFAULT_METHOD
from .analysis.types.type_set import DEFAULT_TYPE_SET_CLASS
from .utils.cuda import setup_cuda
from .utils import learn
def setup_parser(parser: ArgumentParser):
parser.add_argument("train_set", type=str)
parser.add_argument("valid_set", type=str)
parser.add_argument("-o", "--output", type=str, default="model/model")
parser.add_argument("-m", "--method", type=str, default=DEFAULT_METHOD)
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-e", "--epoch", type=int, default=50)
parser.add_argument("-k", type=int, default=3)
parser.add_argument("-s", "--seed", type=int, default=12345678)
parser.add_argument("--no-shuffle", action="store_true")
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--type-set", type=str, default=DEFAULT_TYPE_SET_CLASS)
parser.add_argument("-g", "--gpu", type=int, default=None)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--optimizer", type=str, default="adam")
methods_setup_parser(parser)
def get_optimizer(args, model: nn.Module) -> optim.Optimizer:
if args.optimizer == "sgd":
return optim.SGD(model.parameters(), lr=args.lr)
elif args.optimizer == "adam":
return optim.Adam(model.parameters(), lr=args.lr)
def main(args):
# GPU
setup_cuda(args.gpu)
# Randomness
torch.manual_seed(args.seed)
random.seed(args.seed)
# Method
method = get_method(args.method, args, phase="train")
# Training & Validation dataset
train_set = pickle.load(open(args.train_set, "rb"))
train_set = method.filter_ill_formed(train_set)
valid_set = pickle.load(open(args.valid_set, "rb"))
valid_set = method.filter_ill_formed(valid_set)
# Model
model = method.model()
# Optimizer
optimizer = get_optimizer(args, model)
scheduler = ReduceLROnPlateau(optimizer, 'max', patience=3, min_lr=1e-6, factor=0.1, verbose=True)
# Best Performance
minimal_loss = 1000000000
# Epoch list
for e in range(args.epoch):
# Train
model.train()
learn.run(method, model, train_set, args, prompt=f"[Train] Epoch {e}", optimizer=optimizer, shuffle=not args.no_shuffle)
# Validate
model.eval()
with torch.no_grad():
validation_result = learn.run(method, model, valid_set, args, prompt=f"[Valid] Epoch {e}")
scheduler.step(validation_result.accuracy)
# Save the best performing model
if validation_result.loss < minimal_loss:
minimal_loss = validation_result.loss
torch.save(model, f"{args.output}.best.model")
# Save the last epoch model
torch.save(model, f"{args.output}.last.model")
|
20,076 | 6bd6b6d6746375c7179f3e32b9b49ef416276afe | from turtle import Turtle
import random
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape("circle")
self.penup()
self.shapesize(stretch_wid=0.5, stretch_len=0.5)
self.color("indigo")
self.speed("fastest")
self.disappear()
def disappear(self):
"""
once the snake make contact with food it appear
on a different part of the screen
"""
x_cor = random.randint(-270, 270)
y_cor = random.randint(-270, 270)
self.goto(x_cor, y_cor)
|
20,077 | cf5ba6713b37fd6d37d0085758531e46aea75e0e | from django.shortcuts import render, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, DetailView
from django.contrib import messages
from .models import OrderItem, Order
from .forms import OrderCreateForm
from .tasks import order_created
from cart.cart import Cart
@login_required
def order_create(request):
cart = Cart(request)
if request.method == 'POST' and cart:
customer = request.user.profile
address = request.user.profile.address
form = OrderCreateForm(customer, address, request.POST, request.FILES)
if form.is_valid():
order = form.save()
for item in cart:
OrderItem.objects.create(order=order,
product=item['product'],
price=item['price'],
quantity=item['quantity'])
# clear the cart
cart.clear()
order_created.delay(order.id)
print('hello')
return render(request,
'orders/order/created.html',
{'order': order})
elif request.method == "POST" and not cart:
if request.user.profile.address:
print('no order item')
customer = request.user.profile
address = request.user.profile.address
form = OrderCreateForm(customer, address)
messages.error(request=request, message="No Item in Order")
return render(request,
'orders/order/create.html',
{'cart': cart, 'form': form})
else:
if request.user.profile.address:
customer = request.user.profile
address = request.user.profile.address
form = OrderCreateForm(customer, address)
return render(request,
'orders/order/create.html',
{'cart': cart, 'form': form})
class OrderListView(LoginRequiredMixin, ListView):
model = Order
template_name = "orders/order/list.html"
context_object_name = "orders"
def get_queryset(self):
return self.model.objects.filter(customer=self.request.user.profile)
class OrderDetailView(LoginRequiredMixin, DetailView):
model = Order
template_name = "orders/order/detail.html"
context_object_name = "order"
def get_queryset(self):
return self.model.objects.filter(customer=self.request.user.profile)
|
20,078 | 89a2f81779dee4fa1030e96311b1a7c8262193ad | #!/usr/bin/env python
# -*- coding: GB2312 -*-
# Last modified:
"""docstring
"""
__revision__ = '0.1'
class Enrollment:
def __init__(self, filename):
fin = open(filename)
fin.next()
self.enrollment_info = {}
self.user_info = {}
self.user_enrollment_id = {}
self.course_info = {}
self.ids = []
for line in fin:
#enrollment_id,username,course_id
enrollment_id,username,course_id = line.strip().split(",")
if enrollment_id == "enrollment_id":
continue
self.ids.append(enrollment_id)
self.enrollment_info[enrollment_id] = [username, course_id]
if username not in self.user_info:
self.user_info[username] = [course_id]
self.user_enrollment_id[username] = [enrollment_id]
else:
self.user_info[username].append(course_id)
self.user_enrollment_id[username].append(enrollment_id)
if course_id not in self.course_info:
self.course_info[course_id] = [username]
else:
self.course_info[course_id].append(username)
print "load Enrollment info over!",len(self.course_info),len(self.enrollment_info)
if __name__ == "__main__":
enrollment = Enrollment("../data/train1/enrollment_train.csv")
|
20,079 | bfec6098d5ccbddeee8cf33823a9a75c31636808 | import socket
print('''
██▓███ ▓██ ██▓▄▄▄█████▓ ▒█████ ▒█████ ██▓ ██████
▓██░ ██▒▒██ ██▒▓ ██▒ ▓▒▒██▒ ██▒▒██▒ ██▒▓██▒ ▒██ ▒
▓██░ ██▓▒ ▒██ ██░▒ ▓██░ ▒░▒██░ ██▒▒██░ ██▒▒██░ ░ ▓██▄
▒██▄█▓▒ ▒ ░ ▐██▓░░ ▓██▓ ░ ▒██ ██░▒██ ██░▒██░ ▒ ██▒
▒██▒ ░ ░ ░ ██▒▓░ ▒██▒ ░ ░ ████▓▒░░ ████▓▒░░██████▒▒██████▒▒
▒▓▒░ ░ ░ ██▒▒▒ ▒ ░░ ░ ▒░▒░▒░ ░ ▒░▒░▒░ ░ ▒░▓ ░▒ ▒▓▒ ▒ ░
░▒ ░ ▓██ ░▒░ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ░ ▒ ░░ ░▒ ░ ░
░░ ▒ ▒ ░░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
''')
s = socket.socket()
port = 12345
s.connect(('192.168.43.99', port))
def send_msg():
msg = input("send msg: >> ")
b = bytes(msg, 'utf-8')
s.send(b)
def recv_msg():
msg1 = s.recv(1024)
print("recv msg: >> " + msg1.decode('UTF-8'))
while True:
send_msg()
recv_msg()
|
20,080 | b4aeb930e4fe91414bd37165984c75a103294bba | from v8 import JSContext, JSEngine
from v8.ast import AST
try:
import json
except ImportError:
import simplejson as json
class TestAST:
class Checker(object):
def __init__(self, testcase):
self.testcase = testcase
self.called = []
def __enter__(self):
self.ctxt = JSContext()
self.ctxt.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.ctxt.leave()
def __getattr__(self, name):
return getattr(self.testcase, name)
def test(self, script):
JSEngine().compile(script).visit(self)
return self.called
def onProgram(self, prog):
self.ast = prog.toAST()
self.json = json.loads(prog.toJSON())
for decl in prog.scope.declarations:
decl.visit(self)
for stmt in prog.body:
stmt.visit(self)
def onBlock(self, block):
for stmt in block.statements:
stmt.visit(self)
def onExpressionStatement(self, stmt):
stmt.expression.visit(self)
#print type(stmt.expression), stmt.expression
def testBlock(self):
class BlockChecker(TestAST.Checker):
def onBlock(self, stmt):
self.called.append('block')
assert AST.NodeType.Block == stmt.type
assert stmt.initializerBlock
assert not stmt.anonymous
target = stmt.breakTarget
assert target
assert not target.bound
assert target.unused
assert not target.linked
assert 2 == len(stmt.statements)
assert ['%InitializeVarGlobal("i", 0);', '%InitializeVarGlobal("j", 0);'] ==\
[str(s) for s in stmt.statements]
with BlockChecker(self) as checker:
assert ['block'] == checker.test("var i, j;")
assert """FUNC
. NAME ""
. INFERRED NAME ""
. DECLS
. . VAR "i"
. . VAR "j"
. BLOCK INIT
. . EXPRESSION STATEMENT
. . . CALL RUNTIME
. . . . NAME InitializeVarGlobal
. . . . LITERAL "i"
. . . . LITERAL 0
. . EXPRESSION STATEMENT
. . . CALL RUNTIME
. . . . NAME InitializeVarGlobal
. . . . LITERAL "j"
. . . . LITERAL 0
""" == checker.ast
assert [u'FunctionLiteral', {u'name': u''},
[u'Declaration', {u'mode': u'VAR'},
[u'Variable', {u'name': u'i'}]
], [u'Declaration', {u'mode':u'VAR'},
[u'Variable', {u'name': u'j'}]
], [u'Block',
[u'ExpressionStatement', [u'CallRuntime', {u'name': u'InitializeVarGlobal'},
[u'Literal', {u'handle':u'i'}],
[u'Literal', {u'handle': 0}]]],
[u'ExpressionStatement', [u'CallRuntime', {u'name': u'InitializeVarGlobal'},
[u'Literal', {u'handle': u'j'}],
[u'Literal', {u'handle': 0}]]]
]
] == checker.json
def testIfStatement(self):
class IfStatementChecker(TestAST.Checker):
def onIfStatement(self, stmt):
self.called.append('if')
assert stmt
assert AST.NodeType.IfStatement == stmt.type
assert 7 == stmt.pos
assert stmt.hasThenStatement
assert stmt.hasElseStatement
assert "((value % 2) == 0)" == str(stmt.condition)
assert "{ s = \"even\"; }" == str(stmt.thenStatement)
assert "{ s = \"odd\"; }" == str(stmt.elseStatement)
assert not stmt.condition.isPropertyName
with IfStatementChecker(self) as checker:
assert ['if'] == checker.test("var s; if (value % 2 == 0) { s = 'even'; } else { s = 'odd'; }")
def testForStatement(self):
class ForStatementChecker(TestAST.Checker):
def onForStatement(self, stmt):
self.called.append('for')
assert "{ j += i; }" == str(stmt.body)
assert "i = 0;" == str(stmt.init)
assert "(i < 10)" == str(stmt.condition)
assert "(i++);" == str(stmt.nextStmt)
target = stmt.continueTarget
assert target
assert not target.bound
assert target.unused
assert not target.linked
assert not stmt.fastLoop
def onForInStatement(self, stmt):
self.called.append('forIn')
assert "{ out += name; }" == str(stmt.body)
assert "name" == str(stmt.each)
assert "names" == str(stmt.enumerable)
def onWhileStatement(self, stmt):
self.called.append('while')
assert "{ i += 1; }" == str(stmt.body)
assert "(i < 10)" == str(stmt.condition)
def onDoWhileStatement(self, stmt):
self.called.append('doWhile')
assert "{ i += 1; }" == str(stmt.body)
assert "(i < 10)" == str(stmt.condition)
assert 283 == stmt.condition.pos
with ForStatementChecker(self) as checker:
assert ['for', 'forIn', 'while', 'doWhile'] == checker.test("""
var i, j;
for (i=0; i<10; i++) { j+=i; }
var names = new Array();
var out = '';
for (name in names) { out += name; }
while (i<10) { i += 1; }
do { i += 1; } while (i<10);
""")
def testCallStatements(self):
class CallStatementChecker(TestAST.Checker):
def onVariableDeclaration(self, decl):
self.called.append('var')
var = decl.proxy
if var.name == 's':
assert AST.VarMode.var == decl.mode
assert var.isValidLeftHandSide
assert not var.isArguments
assert not var.isThis
def onFunctionDeclaration(self, decl):
self.called.append('func')
var = decl.proxy
if var.name == 'hello':
assert AST.VarMode.var == decl.mode
assert decl.function
assert '(function hello(name) { s = ("Hello " + name); })' == str(decl.function)
elif var.name == 'dog':
assert AST.VarMode.var == decl.mode
assert decl.function
assert '(function dog(name) { (this).name = name; })' == str(decl.function)
def onCall(self, expr):
self.called.append('call')
assert "hello" == str(expr.expression)
assert ['"flier"'] == [str(arg) for arg in expr.args]
assert 159 == expr.pos
def onCallNew(self, expr):
self.called.append('callNew')
assert "dog" == str(expr.expression)
assert ['"cat"'] == [str(arg) for arg in expr.args]
assert 191 == expr.pos
def onCallRuntime(self, expr):
self.called.append('callRuntime')
assert "InitializeVarGlobal" == expr.name
assert ['"s"', '0'] == [str(arg) for arg in expr.args]
assert not expr.isJsRuntime
with CallStatementChecker(self) as checker:
assert ['var', 'func', 'func', 'callRuntime', 'call', 'callNew'] == checker.test("""
var s;
function hello(name) { s = "Hello " + name; }
function dog(name) { this.name = name; }
hello("flier");
new dog("cat");
""")
def testTryStatements(self):
class TryStatementsChecker(TestAST.Checker):
def onThrow(self, expr):
self.called.append('try')
assert '"abc"' == str(expr.exception)
assert 66 == expr.pos
def onTryCatchStatement(self, stmt):
self.called.append('catch')
assert "{ throw \"abc\"; }" == str(stmt.tryBlock)
#FIXME assert [] == stmt.targets
stmt.tryBlock.visit(self)
assert "err" == str(stmt.variable.name)
assert "{ s = err; }" == str(stmt.catchBlock)
def onTryFinallyStatement(self, stmt):
self.called.append('finally')
assert "{ throw \"abc\"; }" == str(stmt.tryBlock)
#FIXME assert [] == stmt.targets
assert "{ s += \".\"; }" == str(stmt.finallyBlock)
with TryStatementsChecker(self) as checker:
assert ['catch', 'try', 'finally'] == checker.test("""
var s;
try {
throw "abc";
}
catch (err) {
s = err;
};
try {
throw "abc";
}
finally {
s += ".";
}
""")
def testLiterals(self):
class LiteralChecker(TestAST.Checker):
def onCallRuntime(self, expr):
expr.args[1].visit(self)
def onLiteral(self, litr):
self.called.append('literal')
assert not litr.isPropertyName
assert not litr.isNull
assert not litr.isTrue
def onRegExpLiteral(self, litr):
self.called.append('regex')
assert "test" == litr.pattern
assert "g" == litr.flags
def onObjectLiteral(self, litr):
self.called.append('object')
assert 'constant:"name"="flier",constant:"sex"=true' ==\
",".join(["%s:%s=%s" % (prop.kind, prop.key, prop.value) for prop in litr.properties])
def onArrayLiteral(self, litr):
self.called.append('array')
assert '"hello","world",42' ==\
",".join([str(value) for value in litr.values])
with LiteralChecker(self) as checker:
assert ['literal', 'regex', 'literal', 'literal'] == checker.test("""
false;
/test/g;
var o = { name: 'flier', sex: true };
var a = ['hello', 'world', 42];
""")
def testOperations(self):
class OperationChecker(TestAST.Checker):
def onUnaryOperation(self, expr):
self.called.append('unaryOp')
assert AST.Op.BIT_NOT == expr.op
assert "i" == expr.expression.name
#print "unary", expr
def onIncrementOperation(self, expr):
self.fail()
def onBinaryOperation(self, expr):
self.called.append('binOp')
if expr.op == AST.Op.BIT_XOR:
assert "i" == str(expr.left)
assert "-1" == str(expr.right)
assert 124 == expr.pos
else:
assert "i" == str(expr.left)
assert "j" == str(expr.right)
assert 36 == expr.pos
def onAssignment(self, expr):
self.called.append('assign')
assert AST.Op.ASSIGN_ADD == expr.op
assert AST.Op.ADD == expr.binop
assert "i" == str(expr.target)
assert "1" == str(expr.value)
assert 53 == expr.pos
assert "(i + 1)" == str(expr.binOperation)
assert expr.compound
def onCountOperation(self, expr):
self.called.append('countOp')
assert not expr.prefix
assert expr.postfix
assert AST.Op.INC == expr.op
assert AST.Op.ADD == expr.binop
assert 71 == expr.pos
assert "i" == expr.expression.name
#print "count", expr
def onCompareOperation(self, expr):
self.called.append('compOp')
if len(self.called) == 4:
assert AST.Op.EQ == expr.op
assert 88 == expr.pos # i==j
else:
assert AST.Op.EQ_STRICT == expr.op
assert 106 == expr.pos # i===j
assert "i" == str(expr.left)
assert "j" == str(expr.right)
#print "comp", expr
def onConditional(self, expr):
self.called.append('conditional')
assert "(i > j)" == str(expr.condition)
assert "i" == str(expr.thenExpr)
assert "j" == str(expr.elseExpr)
assert 144 == expr.thenExpr.pos
assert 146 == expr.elseExpr.pos
with OperationChecker(self) as checker:
assert ['binOp', 'assign', 'countOp', 'compOp', 'compOp', 'binOp', 'conditional'] == checker.test("""
var i, j;
i+j;
i+=1;
i++;
i==j;
i===j;
~i;
i>j?i:j;
""")
def testSwitchStatement(self):
class SwitchStatementChecker(TestAST.Checker):
def onSwitchStatement(self, stmt):
self.called.append('switch')
assert 'expr' == stmt.tag.name
assert 2 == len(stmt.cases)
case = stmt.cases[0]
assert not case.isDefault
assert case.label.isString
assert 0 == case.bodyTarget.pos
assert 57 == case.pos
assert 1 == len(case.statements)
case = stmt.cases[1]
assert case.isDefault
assert None == case.label
assert 0 == case.bodyTarget.pos
assert 109 == case.pos
assert 1 == len(case.statements)
with SwitchStatementChecker(self) as checker:
assert ['switch'] == checker.test("""
switch (expr) {
case 'flier':
break;
default:
break;
}
""")
|
20,081 | f0cfa748e1ab76390905f833fbdb1e990484b3cd | import logging
import tempfile
import os
import subprocess as sp
import boto3
import json
import time
import shutil
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
logger.info('got event: {}'.format(event['data']))
tmpdir = tempfile.mkdtemp()
f = open(tmpdir + '/leveldb_merge.input', 'w')
f.write(event['data'])
f.close()
result = {}
data = json.loads(event['data'])
cloud_files = [f['number'] for f in data['cloud_files']]
local_files= [f['number'] for f in data['local_files']]
start = time.time()
s3 = boto3.resource('s3')
for fnum in cloud_files + local_files:
s3.meta.client.download_file(os.environ['LEVELDB_BUCKET'],
'%06d.ldb' % fnum,
'%s/%06d.ldb' % (tmpdir, fnum))
end = time.time()
result['download_time'] = end - start
start = time.time()
res_json = sp.check_output(['./standalone_merger', os.environ['LEVELDB_REGION'], os.environ['LEVELDB_BUCKET'], tmpdir])
end = time.time()
result['merge_time'] = end - start
logger.info('got result: {}'.format(res_json))
result['data'] = res_json
start = time.time()
for f in json.loads(res_json):
fnum = f['number']
s3.meta.client.upload_file('%s/%06d.ldb' % (tmpdir, fnum),
os.environ['LEVELDB_BUCKET'],
'%06d.ldb' % fnum)
end = time.time()
result['upload_time'] = end - start
# clean tmpdir
shutil.rmtree(tmpdir)
return result
|
20,082 | abcf3bcb281f29529f5452615a94402ae043b3a3 | import urllib
from urllib import FancyURLopener
from random import choice
from BeautifulSoup import BeautifulSoup
import sys
from common_utils import *
import time
import os
import csv
import random
import requests
import time
from datetime import datetime, date #, time
### TODO: Need to correct for different date endianess formats!!!!
# TODO: Reorganize scrape sequence (to avoid repeating 60 identical queries)
# TODO: Refactor
#ELIMINATE REDUNDANCY FROM /12
######################################################################
# Define some variables
# Need to use xx format for year (e.g. 09, 12)
# https://www.google.com/search?hl={language}&tbm=nws&gl={location}&as_q={query}&as_occt=any&as_drrb=b&as_mindate={monthS}%2F{dayS}%2F{yearS}&as_maxdate={monthF}%2F{dayF}%2F{yearF}&tbs=cdr%3A1%2Ccd_min%3A{monthS}%2F{dayS}%2F{yearS}%2Ccd_max%3A{monthF}%2F{dayF}%2F{yearF}
URL_BASE = 'https://www.google.com/search?hl={language}&tbm=nws&gl={location}&as_q={query}&as_occt=any&as_drrb=b&as_mindate={monthS}%2F{dayS}%2F{yearS}&as_maxdate={monthF}%2F{dayF}%2F{yearF}&tbs=cdr%3A1%2Ccd_min%3A{monthS}%2F{dayS}%2F{yearS}%2Ccd_max%3A{monthF}%2F{dayF}%2F{yearF}'
#URL_BASE_LIT = 'https://www.google.com/search?hl={language}&tbm=nws&gl={location}&as_q={query}&as_occt=any&as_drrb=b&as_mindate={dayS}%2F{monthS}%2F{yearS}&as_maxdate={dayF}%2F{monthF}%2F{yearF}&tbs=cdr%3A1%2Ccd_min%3A{dayS}%2F{monthS}%2F{yearS}%2Ccd_max%3A{dayF}%2F{monthF}%2F{yearF}'
# URL_BASE = 'https://www.google.com/search?hl=%(language)s&tbm=nws&gl=%(location)s&ras_q=%(query)s&as_occt=any&as_drrb=b&as_mindate=%(monthS)s%2F%(dayS)s%2F0%(yearS)s&as_maxdate=%(monthF)s%2F%(dayF)s%2F0%(yearF)s&tbs=cdr%3A1%2Ccd_min%3A%(monthS)s%2F%(dayS)s%2F0%(yearS)s%2Ccd_max%3A%(monthF)s%2F%(dayF)s%2F0%(yearF)s'
# OUTPUT_CSV = 'gnews-with-time.csv'
COUNTRY_LANGS = {'us' : 'en' } #, 'in' : 'en', 'ng' : 'en', 'jp' : 'ja', 'hk' : 'zh-TW', 'kr' : 'ko', \
#'tw' : 'zh-TW', 'cn' : 'zh-CN', 'in' : 'ml', 'mx' : 'es', 'co' : 'es', 'ar' : 'es', \
#'fr' : 'fr', 'ca' : 'fr', 'be' : 'fr', 'be' : 'nl', 'br' : 'pt-BR', 'pt' : 'pt-PT', \
#'cz' : 'cs', 'de' : 'de', 'it' : 'it', 'hu' : 'hu', 'nl' : 'nl', 'no' : 'no', 'at' : 'de', \
#'pl' : 'pl', 'ch' : 'de', 'se' : 'sv', 'tr' : 'tr', 'vn' : 'vi', 'gr' : 'el', 'ru' : 'ru', 'ua' : 'ru', \
#'ua' : 'uk', 'il' : 'iw', 'in' : 'hi', 'sa' : 'ar', 'lb' : 'ar', 'eg' : 'ar' }
LANGS_CORR = {'en' : 1, 'es' : 2, 'tr' : 3, 'ja' : 4, 'it' : 5, 'zh' : 6, 'fr' : 7, 'de' : 8, \
'ru' : 9, 'nl' : 10, 'iw' : 11, 'ar' : 12, 'el' : 13, 'pt' : 14, 'hi' : 15, 'ko' : 16, \
'vi': 17, 'uk' : 18, 'ml' : 19, 'hu' : 20, 'no' : 21, 'pl' : 22, 'sv': 23}
PROXY_INPUT = open('proxyraw_goodconf.csv', 'r')
PROXY_LIST = PROXY_INPUT.readline().split(',')
FILENAME_BASE = 'gnews_time_output'
START_TIME = time.clock()
print "Start time: " + str(START_TIME)
# TODO: Use proxies
PROXIES = {'http' : 'http://' + '{}'.format(PROXY_LIST[random.randint(0,len(PROXY_LIST)-1)])}
# Open language-country mappings
input = open('country-names-input.csv', 'r')
# List of User Agents
# USER_AGENTS = ['Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; T312461)']#, \
# 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)', \
# 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)', \
# 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:0.9.3) Gecko/20010801', \
# 'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/xx (KHTML like Gecko) OmniWeb/v5xx.xx']
# Create a subclass of fancyurlopener that uses a specific user agent (html varies from one to another)
class gNewsOpener(FancyURLopener, object):
version = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0; T312461)'
# Define special exception for Captchas
class CaptchaException(Exception):
def __init__(self):
return
def __str__(self):
print "Rate Limit Exceeded"
#def getProxy():
# try:
# proxy = {'http': "http://"+options.proxy}
# opener = urllib.FancyURLopener(proxy)
# except(socket.timeout):
# print "\n(-) Proxy Timed Out"
# sys.exit(1)
# except(),msg:
# print "\n(-) Proxy Failed"
# sys.exit(1)
#return opener
Dictionary of dates in question:
dates_dict = {'07': range(1,13), '08': range(1,13), '09': range(1,13),
'10': range(1,13), '11': range(1,13), '12': range(1,5)}
def writeHeader():
# Write header of countries
output.write("time_period")
for line in input:
query_group_list = [query_group[1:-1].split(',') for query_group in line.split("/")]
output.write("," + query_group_list[1][0])
# For each language edition
for country in COUNTRY_LANGS:
language = COUNTRY_LANGS[country]
# Fix multiple editions in zh and pt issue, assign which column in countrynames to use
if language == 'zh-CN' or language == 'zh-TW':
column = LANGS_CORR['zh']
elif language == 'pt-PT' or language == 'pt-BR':
column = LANGS_CORR['pt']
elif language in LANGS_CORR:
column = LANGS_CORR[language]
print "Country: " + country + ", Language: " + language + ", Column: " + str(column)
# Open new file for this edition
outputfilename = append_to_filename(FILENAME_BASE, country + "_" + language)
#open(outputfilename + ".csv", 'w').close()
output = open(outputfilename, 'w')
print "Created file: " + outputfilename
# Write header of countries
writeHeader()
# Open language-country mappings
input = open('country-names-input.csv', 'r')
# Iterate through months
for year, months in dates_dict.iteritems():
for month in months:
output.write("," + str(month) + "/1/" + year + "-")
# Iterate through countrynames input (will be rows)
for line in input:
# Turn each line into a list of list of queries (can be multiple per lang)
query_group_list = [query_group[1:-1].split(',') for query_group in line.split("/")]
output.write(query_group_list[1][0])
length = len(query_group_list[column])
print "Length of group: " + str(length)
# For every query in a list of queries
for query in query_group_list[column]:
gnewsopener = gNewsOpener(proxies={'http' : 'http://' + '{}'.format(PROXY_LIST[random.randint(0,len(PROXY_LIST)-1)])})
print gnewsopener.proxies.values()[0]
print "User Agent: " + gnewsopener.version
print "Search query in " + language + ": " + query
try:
if month < 12:
URL = URL_BASE.format(language = language, location = country, \
query = query, monthS = month, dayS = 1, yearS = year, monthF = month + 1, \
dayF = 1, yearF = year)
elif month == 12:
URL = URL_BASE.format(language = language, location = country, \
query = query, monthS = 12, dayS = '1', yearS = year, monthF = 1, \
dayF = '1', yearF = year_tuple[year_tuple.index(year)+1])
page = gnewsopener.open(URL)
#print "URL: " + URL
soup = BeautifulSoup(page)
print soup.findAll('b')[0:5]
if len(soup.findAll('b')) < 3:
print "Captcha time!"
raise CaptchaException
num_results = max([int(ele.getText().replace(',','')) for ele in soup.findAll('b') if ele.getText().replace(',','').isdigit()])
if num_results > 1:
total_count+=num_results
except CaptchaException, IndexError:
raise SystemExit
except ValueError:
PROXIES.remove(gnewsopener.proxies.values[0])
print "Removed: " + str(gnewsopener.proxies.values[0])
gnewsopener = gNewsOpener(proxies=PROXIES)
print "Total count: " + str(total_count)
average_count = total_count/length
print "Average count: " + str(average_count)
output.write("," + str(average_count))
output.write("\n")
input = open('country-names-input.csv', 'r')
END_TIME = time.clock()
print "Ended at: " + END_TIME
print "Total scrape took: " + str(END_TIME - START_TIME)
output.close()
|
20,083 | 1a5e1308b904a6dabf4f56e0aee7683770905730 | import sys
sys.path.append("/Users/niall/codeclan_work/final_project/")
from fireDetectCNN import inceptionMap
import cv2
import math as m
from fireDetectCNN.inceptionMap import construct_inceptionv1onfire
import os
# InceptionCNN
if __name__ == '__main__':
model = construct_inceptionv1onfire (224, 224, training=False)
# model.load(os.path.join("models/InceptionV4-OnFire", "inceptionv4onfire"),weights_only=True)
model.load(os.path.join("modelsExperimental/InceptionV1-OnFire", "inceptiononv1onfire"),weights_only=True)
print("[INFO] Loaded CNN network weights ...")
# network input sizes - model layout must match weights pattern
rows = 224
cols = 224
# display and loop settings
windowName = "Inception V1"
keepProcessing = True
# initialise webcam input
video = cv2.VideoCapture(0)
print("[INFO] Loaded video ...")
# open window
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
# grab video info
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = video.get(cv2.CAP_PROP_FPS)
frame_time = round(1000/fps)
while (keepProcessing):
start_t = cv2.getTickCount()
ret, frame = video.read()
if not ret:
print("[INFO] ... end of video file reached")
break
# re-size image to network input size and perform prediction
small_frame = cv2.resize(frame, (rows, cols), cv2.INTER_AREA)
# perform prediction on the image frame which is:
# - an image (tensor) of dimension 224 x 224 x 3
# Note tensor must be same size as network input requirements. Refer to pixel convulutions.
# - a 3 channel colour image with channel ordering BGR (not RGB)
output = model.predict([small_frame])
# label image based on prediction
if round(output[0][0]) == 1: # equiv. to 0.5 threshold in [Dunnings / Breckon, 2018], [Samarth/Bhowmik/Breckon, 2019] test code
cv2.putText(frame,'FIRE',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(0,0,255),10,cv2.LINE_AA)
else:
cv2.putText(frame,'CLEAR',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),10,cv2.LINE_AA)
# stop the timer and convert to ms
stop_t = ((cv2.getTickCount() - start_t)/cv2.getTickFrequency()) * 1000
# video stream display
cv2.imshow(windowName, frame)
# wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms)
key = cv2.waitKey(max(2, frame_time - int(m.ceil(stop_t)))) & 0xFF
# exit key
if (key == ord('x')):
keepProcessing = False
elif (key == ord('f')):
cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
|
20,084 | 63e64e41fae0d9995fb311b58bba510ff1a24174 | import pygame
import time
from pygame.locals import *
import classdefs
# To execute: $ python main.py
[a, b] = pygame.init()
print ("Modules loaded: ", a, "--- Module errors: ", b)
screen = pygame.display.set_mode((800, 600))
background = pygame.Surface(screen.get_size())
background.fill((0, 0, 0))
# instantiate our player; right now he's just a rectangle
player = classdefs.Player()
player.rect.x = 400
player.rect.y = 300
enemies = pygame.sprite.Group()
food = pygame.sprite.Group()
venomous_food = pygame.sprite.Group()
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
bullets = pygame.sprite.Group()
bosses = pygame.sprite.Group()
# Program events
ADDENEMY = pygame.USEREVENT + 1
ADDFOOD = pygame.USEREVENT + 2
ADDVENOMOUSFOOD = pygame.USEREVENT + 3
VENOMSTATE = pygame.USEREVENT + 4
ADDBOSS = pygame.USEREVENT + 5
pygame.time.set_timer(ADDENEMY, 1000)
pygame.time.set_timer(ADDFOOD, 500)
pygame.time.set_timer(ADDVENOMOUSFOOD, 2000)
pygame.time.set_timer(ADDBOSS, 6800)
# Set score
font = pygame.font.Font(None, 36)
text_r = font.render("Score: " + str(player.points), 1, (0, 255, 127))
textpos = text_r.get_rect(centerx=background.get_width() / 2)
# Variable to keep our main loop running
running = True
# Our main loop!
while running:
# for loop through the event queue
for event in pygame.event.get():
# Check for KEYDOWN event; KEYDOWN is a constant defined in
# pygame.locals, which we imported earlier
if event.type == KEYDOWN:
# If the Esc key has been pressed set running to false to exit the
# main loop
if event.key == K_ESCAPE or event.key == K_c:
running = False
elif event.key == K_SPACE:
new_bullet = classdefs.Shoot(player.rect)
bullets.add(new_bullet)
all_sprites.add(new_bullet)
# Check for QUIT event; if QUIT, set running to false
elif event.type == QUIT:
running = False
elif event.type == ADDENEMY:
new_enemy = classdefs.Enemy()
enemies.add(new_enemy)
all_sprites.add(new_enemy)
elif event.type == ADDFOOD:
new_food = classdefs.Food()
new_food.surf = pygame.Surface((10, 10))
new_food.surf.fill((0, 255, 0))
food.add(new_food)
all_sprites.add(new_food)
elif event.type == ADDVENOMOUSFOOD:
new_vfood = classdefs.VenomousFood()
new_vfood.surf = pygame.Surface((10, 10))
new_vfood.surf.fill((255, 0, 0))
venomous_food.add(new_vfood)
all_sprites.add(new_vfood)
elif event.type == VENOMSTATE:
pygame.time.set_timer(VENOMSTATE, 0) # Delete timer
player.velocity = 2
print("Got back to normal state")
elif event.type == ADDBOSS:
new_boss = classdefs.Boss()
bosses.add(new_boss)
all_sprites.add(new_boss)
pressed_keys = pygame.key.get_pressed()
player.update(pressed_keys)
enemies.update()
food.update()
venomous_food.update()
bullets.update()
bosses.update()
collided_enemy = pygame.sprite.spritecollideany(player, enemies)
if collided_enemy:
print("You are DEAD! :(")
collided_enemy.kill()
player.kill()
time.sleep(1)
running = False
collided_food = pygame.sprite.spritecollideany(player, food)
if collided_food:
collided_food.kill()
player.points += 1
print("Points: ", player.points)
collided_vfood = pygame.sprite.spritecollideany(player, venomous_food)
if collided_vfood:
collided_vfood.kill()
player.points -= 5
print("Points: ", player.points)
# Create timer
pygame.time.set_timer(VENOMSTATE, 4000)
# Increase speed
player.velocity = 1
print("Got poisoned")
for entity in bullets:
collided_boss = pygame.sprite.spritecollideany(entity, bosses)
if collided_boss:
player.points += 1
entity.kill() # Delete bullet
collided_boss.hits += 1
if (collided_boss.hits == 5):
collided_boss.kill()
player.points += 10
# Update screen
screen.blit(background, (0, 0))
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
# Update score and render text
text_r = font.render("Score: " + str(player.points), 1, (0, 255, 127))
screen.blit(text_r, textpos)
# Update the display
pygame.display.flip()
# time.sleep(0.1)
|
20,085 | 4f0a0697ccb49c88c23c44d793d41db3ce5efd2c | from driver_initialiser import driver_finder
import time
from selenium.webdriver.common.keys import Keys
driver = driver_finder()
driver.get("https://www.google.com")
driver.maximize_window()
time.sleep(5)
agree = driver.find_element_by_id("L2AGLb").click()
search_box = driver.find_element_by_name("q").send_keys("Selenium")
driver.find_element_by_name("q").send_keys(Keys.RETURN)
time.sleep(5)
driver.quit()
|
20,086 | 95017f8193933514044aa23550003c33f2c8aa7c | # coding: utf-8
from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import argparse
import cv2
import time
import os
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
from utils.data_aug import letterbox_resize
from model import yolov3
parser = argparse.ArgumentParser(description="YOLO-V3 video test procedure.")
parser.add_argument("input_video", type=str,
help="The path of the input video.")
parser.add_argument("--anchor_path", type=str, default="./data/yolo_anchors.txt",
help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[416, 416],
help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--letterbox_resize", type=lambda x: (str(x).lower() == 'true'), default=True,
help="Whether to use the letterbox resize.")
parser.add_argument("--class_name_path", type=str, default="./data/coco.names",
help="The path of the class names.")
parser.add_argument("--restore_path", type=str, default="/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/data/darknet_weights/yolov3.ckpt",
help="The path of the weights to restore.")
args = parser.parse_args()
args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)
color_table = get_color_table(args.num_class)
# /media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/00.mp4
seq=[args.input_video]
'''
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-12.mp4")
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-06.mp4")
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-01.mp4")
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-08.mp4")
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-13.mp4")
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-14.mp4")
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-.mp4")
seq.append("/media/lab/INTEL_SSD/610821239/yolo3/YOLOv3_TensorFlow/MOT16-03.mp4")
#seq=["C:/Users/610521248/Desktop/610821239/yolo3/YOLOv3_TensorFlow/MOT16-01.mp4"]
'''
for seqinfo in seq:
path = "/media/lab/INTEL_SSD/610821239/dataset/2DMOT2015/test/"+seqinfo[55:]+"-YOLO/det/"
if (os.path.exists(path)):
f= open(path+"det.txt","w")
else:
os.makedirs(path)
f= open(path+"det.txt","w")
#parser = argparse.ArgumentParser(description="YOLO-V3 video test procedure.")
#parser.add_argument("input_video", type=str,default="E:/OBJECT_DECTECT/yolo3/YOLOv3_TensorFlow/MOT16-02.mp4",
# help="The path of the input video.")
vid = cv2.VideoCapture(seqinfo)
video_frame_cnt = int(vid.get(7))
video_width = int(vid.get(3))
video_height = int(vid.get(4))
video_fps = int(vid.get(5))
'''
#if args.save_video:
# fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
# videoWriter = cv2.VideoWriter('video_result.mp4', fourcc, video_fps, (video_width, video_height))
'''
with tf.Session() as sess:
input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')
yolo_model = yolov3(args.num_class, args.anchors)
with tf.variable_scope('yolov3'):
pred_feature_maps = yolo_model.forward(input_data, False)
pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)
pred_scores = pred_confs * pred_probs
boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=200, score_thresh=0.3, nms_thresh=0.45)
saver = tf.train.Saver()
saver.restore(sess, args.restore_path)
#fp = open("filename.txt", "w")
for frame in range((video_frame_cnt)):
ret, img_ori = vid.read()
if args.letterbox_resize:
img, resize_ratio, dw, dh = letterbox_resize(img_ori, args.new_size[0], args.new_size[1])
else:
height_ori, width_ori = img_ori.shape[:2]
img = cv2.resize(img_ori, tuple(args.new_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, np.float32)
img = img[np.newaxis, :] / 255.
start_time = time.time()
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})
end_time = time.time()
# rescale the coordinates to the original image
if args.letterbox_resize:
boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio
boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio
else:
boxes_[:, [0, 2]] *= (width_ori/float(args.new_size[0]))
boxes_[:, [1, 3]] *= (height_ori/float(args.new_size[1]))
for i in range(len(boxes_)):
x0, y0, x1, y1 = boxes_[i]
plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]] + ', {:.2f}%'.format(scores_[i] * 100), color=color_table[labels_[i]])
w=x1-x0
h=y1-y0
if labels_[i] != 0 :
continue
# 寫入 This is a testing! 到檔案
det=[frame+1,-1,x0,y0,w,h,scores_[i],-1,-1,-1]
print(','.join(map(str,det)),file=f)
print(str(frame)+'/'+str(video_frame_cnt))
# 關閉檔案
f.close()
vid.release()
#python video_test.py ./data/demo_data/video.mp4 |
20,087 | 280d7d3d99f170837ab6b8718a72b3b269b343e4 | #/usr/bin/python3
from sys import argv as _argv
from subprocess import run as _run
from os import path as _path
from os import getcwd as _getcwd
from time import sleep as _sleep
args = _argv[1:]
pwd = _getcwd()
for arg in args:
if not _path.exists(arg):
_run('mkdir {dir_name}'.format(dir_name=arg), shell=True)
_run('apt-get download $(apt-cache depends --recurse --no-recommends --no-suggests \
--no-conflicts --no-breaks --no-replaces --no-enhances \
--no-pre-depends {arg} | grep "^\w")'.format(arg=arg), shell=True)
_run('mv *.deb {pwd}/{dir_name}'.format(pwd=pwd, dir_name=arg), shell=True)
_sleep(0.1)
|
20,088 | 7680a387d35710aeb0817e121523116a33409294 | # Scrapy settings for dirbot project
SPIDER_MODULES = ['tripadvisorbot.spiders']
NEWSPIDER_MODULE = 'tripadvisorbot.spiders'
DOWNLOAD_DELAY=2
#ITEM_PIPELINES = ['tripadvisorbot.pipelines.FilterWordsPipeline']
|
20,089 | e96208feaea7cad2acba4172d8b1d63beb1158c5 | import logging
import schedule
import time
from app.app import init_app
def health_check():
logging.info('Health check pass')
def default_run():
logging.debug('Default run start...')
schedule.every(1).minute.do(health_check)
logging.info('Default run done.')
def loop():
logging.debug('Start loop')
while True:
schedule.run_pending()
time.sleep(60)
if __name__ == '__main__':
init_app()
default_run()
loop()
|
20,090 | 33e9d1e7499bc4f4a5bbcff03fc0d3559f8689bd | from __future__ import unicode_literals
from django.db import models
from merchants.models import Merchant
# Create your models here.
class MenuCategory(models.Model):
name = models.CharField(max_length=100)
merchant = models.ForeignKey(Merchant, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.name
class MenuItem(models.Model):
menu_category = models.ForeignKey(MenuCategory, on_delete=models.CASCADE, null=True)
entry_name = models.CharField(max_length=128)
entry_description = models.CharField(max_length=200)
entry_price = models.DecimalField(max_digits=6, decimal_places=2)
def __str__(self):
return "%s (%s) $%s" % (self.entry_name, self.entry_description, self.entry_price)
|
20,091 | fddabd9c47e8ce87e01a62969a11fe79c5f17bdd | from selenium import webdriver
#driver = webdriver.Chrome(executable_path="C:\\chromedriver.exe")
#driver.get("https://www.apple.com")
#print(driver.title,"\n")
#print (driver.current_url)
#driver.maximize_window()
#driver.get("https://www.apple.com/mac/")
#river.get("https://rahulshettyacademy.com/ ")
#driver.close()
try:
driver = webdriver.Chrome(executable_path="C:\\chromedriver.exe")
driver.get("https://rahulshettyacademy.com/ ")
#driver.quit()
driver.get("https://rahulshettyacademy.com/angularpractice/")
driver.find_element_by_name("name").send_keys("Perla ")
driver.find_element_by_name("email").send_keys("xxx.@gmail.com")
driver.find_element_by_id("exampleCheck1").click()
#driver.find_element_by_id("exampleCheck1").click()
driver.find_element_by_css_selector("input[name='name']").send_keys("abhilash")
driver.find_element_by_css_selector("#exampleInputPassword1").send_keys("two")
#driver.maximize_window()
driver.find_element_by_xpath("//input[@type='submit']").click()
#driver.find_element_by_id("exampleInptPasswoed1").send_keys("1234")
#driver.refresh()
#driver.back()
#driver.minimize_window()
#print(driver.find_element_by_class_name("alert").text)
print(driver.find_element_by_css_selector("div[class *= 'alert-success']").text)
print(driver.find_element_by_xpath("//div[@class='alert alert-success alert-dismissible']").text)
except Exception as e:
print(e)
finally:
#driver.quit()
print("I dont want close")
|
20,092 | 1d7d37c41e8a9d1922004eab1311db6dc9a5b32f | import numpy as np
def edit_distance(a, b):
m, n = len(a), len(b)
D = np.zeros((m+1,n+1), dtype=np.int32)
for i in range(m+1):
D[i][0]=i
for j in range(n+1):
D[0][j]=j
for i in range(1, m+1):
for j in range(1, n+1):
cost = 1 if a[i-1]!=b[j-1] else 0
D[i][j] = min(D[i-1][j]+1, D[i][j-1]+1, D[i-1][j-1]+cost)
return D[m][n]
def main():
print(edit_distance("HelloWorld", "Halloworld"))
print(edit_distance("AGCCT", "ATCT"))
if __name__=="__main__":
main()
|
20,093 | f4072bc63c591c97444a1fac009ed3dc5e83b842 | import functools
import warnings
from collections import OrderedDict, defaultdict, namedtuple
import torch.autograd.profiler as torch_profiler
import csv
from .display import traces_to_display
Trace = namedtuple("Trace", ["path", "leaf", "module"])
KPIObject = namedtuple(
"KPIObject",
[ # when attr value is None, profiler unsupported
"model",
"name",
"self_cpu_total",
"cpu_total",
"self_cuda_total",
"cuda_total",
"self_cpu_memory",
"cpu_memory",
"self_cuda_memory",
"cuda_memory",
"occurrences",
],
)
def walk_modules(module, name="", path=()):
"""Generator. Walks through a PyTorch Module and outputs Trace tuples"""
if not name:
name = module.__class__.__name__
named_children = list(module.named_children())
path = path + (name,)
yield Trace(path, len(named_children) == 0, module)
# recursively walk into all submodules
for name, child_module in named_children:
yield from walk_modules(child_module, name=name, path=path)
class Profile(object):
"""Layer by layer profiling of PyTorch models, using the PyTorch autograd profiler."""
def __init__(
self, model, enabled=True, use_cuda=False, profile_memory=False, paths=None
):
self._model = model
self.enabled = enabled
self.use_cuda = use_cuda
self.profile_memory = profile_memory
self.paths = paths
self.entered = False
self.exited = False
self.traces = ()
self.trace_profile_events = defaultdict(list)
self.num_params = 0
def __enter__(self):
if not self.enabled:
return self
if self.entered:
raise RuntimeError("Profiler is not reentrant")
self.entered = True
self._forwards = {} # store the original forward functions
self.num_params = self._count_parameters()
self.traces = tuple(map(self._hook_trace, walk_modules(self._model)))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
tuple(map(self._remove_hook_trace, self.traces))
del self._forwards # remove unnecessary forwards
self.exited = True
def __str__(self):
return self.display()
def __call__(self, *args, **kwargs):
return self._model(*args, **kwargs)
"""
counting the number of parmeters in the model
parameter: none
"""
def _count_parameters(self):
return sum(p.numel() for p in self._model.parameters() if p.requires_grad)
"""
hooking function for replacing orgianl forward functions with profiling function
parameter: list of module
"""
def _hook_trace(self, trace):
[path, leaf, module] = trace
if (self.paths is not None and path in self.paths) or (
self.paths is None and leaf
):
_forward = module.forward
self._forwards[path] = _forward
@functools.wraps(_forward)
def wrap_forward(*args, **kwargs):
try:
with torch_profiler.profile(
use_cuda=self.use_cuda, profile_memory=self.profile_memory
) as prof:
res = _forward(*args, **kwargs)
except TypeError:
if self.profile_memory:
warnings.warn(
"`profile_memory` is unsupported in torch < 1.6",
RuntimeWarning,
)
self.profile_memory = False
with torch_profiler.profile(use_cuda=self.use_cuda) as prof:
res = _forward(*args, **kwargs)
event_list = prof.function_events
if hasattr(event_list, "populate_cpu_children"):
event_list.populate_cpu_children()
# each profile call should be contained in its own list
self.trace_profile_events[path].append(event_list)
return res
module.forward = wrap_forward
return trace
"""
removing the hooking function
"""
def _remove_hook_trace(self, trace):
[path, leaf, module] = trace
if (self.paths is not None and path in self.paths) or (
self.paths is None and leaf
):
module.forward = self._forwards[path]
def raw(self):
if self.exited:
return (self.traces, self.trace_profile_events)
def display(self, show_events=False):
if self.exited:
return traces_to_display(
self.traces,
self.trace_profile_events,
show_events=show_events,
paths=self.paths,
use_cuda=self.use_cuda,
profile_memory=self.profile_memory,
)
return "<unfinished profile>"
"""
collecting measured KPI values
parameter:
method: string
model name: string
"""
#Gets the system resource usage for each model using the Python profiler
def getKPIData(self, method, modelname):
layers = []
rows = []
for trace in self.traces:
[path, leaf, module] = trace
current_layers = layers
# unwrap all of the events, in case model is called multiple times
events = [te for t_events in self.trace_profile_events[path] for te in t_events]
for depth, name in enumerate(path, 1):
if name not in current_layers:
current_layers.append(name)
if depth == len(path) and (
(self.paths is None and leaf) or (self.paths is not None and path in self.paths)
):
self_cpu_memory = None
has_self_cpu_memory = any(hasattr(e, "self_cpu_memory_usage") for e in events)
if has_self_cpu_memory:
self_cpu_memory = sum([getattr(e, "self_cpu_memory_usage", 0) for e in events])
cpu_memory = None
has_cpu_memory = any(hasattr(e, "cpu_memory_usage") for e in events)
if has_cpu_memory:
cpu_memory = sum([getattr(e, "cpu_memory_usage", 0) for e in events])
self_cuda_memory = None
has_self_cuda_memory = any(hasattr(e, "self_cuda_memory_usage") for e in events)
if has_self_cuda_memory:
self_cuda_memory = sum(
[getattr(e, "self_cuda_memory_usage", 0) for e in events]
)
cuda_memory = None
has_cuda_memory = any(hasattr(e, "cuda_memory_usage") for e in events)
if has_cuda_memory:
cuda_memory = sum([getattr(e, "cuda_memory_usage", 0) for e in events])
# self CUDA time supported in torch >= 1.7
self_cuda_total = None
has_self_cuda_time = any(hasattr(e, "self_cuda_time_total") for e in events)
if has_self_cuda_time:
self_cuda_total = sum([getattr(e, "self_cuda_time_total", 0) for e in events])
kpiObject = self.format_measurements(modelname, name, sum([e.self_cpu_time_total for e in events]),
sum([e.cpu_time_total for e in events]), self_cuda_total,
sum([e.cuda_time_total for e in events]), self_cpu_memory, cpu_memory,
self_cuda_memory, cuda_memory, len(self.trace_profile_events[path]))
rows.append(kpiObject)
return self.exportToCSV(rows, method)
"""
converting measurement units and store the result into the data structure
params:
model: string
name: string
self_cpu_total: float
self_cuda_total: float
cpu_total: float
self_cpu_memory: float
cpu_memory: float
self_cuda_memory: float
self_memory: float
occurences: int
"""
#Formats the KPI measurements into our format needed in the CSV files
def format_measurements(self, model, name, self_cpu_total, cpu_total, self_cuda_total,
cuda_total, self_cpu_memory, cpu_memory, self_cuda_memory,
cuda_memory, occurrences):
self_cpu_total = self_cpu_total/1000.0
cpu_total = cpu_total/1000.0
self_cuda_total = self_cuda_total/1000.0 if self_cuda_total is not None else 0
cuda_total = cuda_total/1000.0 if cuda_total else 0
self_cpu_memory = (
self_cpu_memory/1024.0
if self_cpu_memory is not None
else 0
)
cpu_memory = (
cpu_memory/1024.0
if cpu_memory is not None
else 0
)
self_cuda_memory = (
self_cuda_memory/1024.0
if self_cuda_memory is not None
else 0
)
cuda_memory = (
cuda_memory/1024.0
if cuda_memory is not None
else 0
)
occurrences = occurrences if occurrences else 0
return KPIObject(
model = model,
name = name,
self_cpu_total=self_cpu_total,
cpu_total=cpu_total,
self_cuda_total=self_cuda_total,
cuda_total=cuda_total,
self_cpu_memory=self_cpu_memory,
cpu_memory=cpu_memory,
self_cuda_memory=self_cuda_memory,
cuda_memory=cuda_memory,
occurrences=occurrences,
)
"""
exporting the stored measurment data to CSV
prams:
rows: list
method: string
"""
#Exports the system resources to a CSV file for us to use
>>>>>>> 6e6cc2ddb5fcfa51d23015ec84df5d5b4147f46f
def exportToCSV(self, rows, method):
model = rows[0].model
file = 'csv/' + model + method + "_KPI.csv"
f = open(file, 'w')
with f:
headers = ['MODULE', 'SELF_CPU_TOTAL', 'SELF_CPU_TIME_UOM', 'CPU_TOTAL', 'CPU_TOTAL_UOM',
'SELF_GPU_TOTAL', 'SELF_GPU_UOM', 'GPU_TOTAL', 'GPU_TOTAL_UOM',
'SELF_CPU MEM','SELF_CPU_MEM_UOM', 'CPU_MEM','CPU_MEM_UOM','SELF_GPU_MEM',
'SELF_GPU_MEM_UOM','GPU_MEM','GPU_MEM_UOM','NUMBER_OF_CALLS', 'NUMBER_OF_PARAMS']
writer = csv.DictWriter(f, fieldnames=headers)
writer.writeheader()
for i in range(len(rows)):
kpi = rows[i]
writer.writerow({'MODULE':kpi.name,
'SELF_CPU_TOTAL':kpi.self_cpu_total,
'SELF_CPU_TIME_UOM': 'ms',
'CPU_TOTAL':kpi.cpu_total,
'CPU_TOTAL_UOM': 'ms',
'SELF_GPU_TOTAL':kpi.self_cuda_total,
'SELF_GPU_UOM':'ms',
'GPU_TOTAL':kpi.cuda_total,
'GPU_TOTAL_UOM': 'ms',
'SELF_CPU MEM':kpi.self_cpu_memory,
'SELF_CPU_MEM_UOM': 'kb',
'CPU_MEM':kpi.cpu_memory,
'CPU_MEM_UOM': 'kb',
'SELF_GPU_MEM':kpi.self_cuda_memory,
'SELF_GPU_MEM_UOM':'kb',
'GPU_MEM':kpi.cuda_memory,
'GPU_MEM_UOM': 'kb',
'NUMBER_OF_CALLS': kpi.occurrences,
'NUMBER_OF_PARAMS' : self.num_params
})
return file
|
20,094 | b970d717c8deb68b4af4ac4dd3b99a2fe7ae7702 | import tkinter as tk
from tkinter import ttk
import create_tool_tip as tt
cg_font=('century gothic', 18)#
cg_font2=('century gothic', 27, 'italic')#
cg_font3=('century gothic', 10)
cg_font4=('century gothic', 15)
cg_font5=('century gothic', 14)
class Search(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
#self.resizable(0,0)
self.pack()
self.create_widgets()
strx=['xx','xx','xx','xx','xx','xx','xx', 'xx']
def clickMe2(self):
self.stry=['parle g', 'parle', '100', '3/12/2015, 3/12/2015', '15', 'yes', 'yes']
self.pro.set(self.stry[0])
self.brand.set(self.stry[1])
self.quan.set(self.stry[2])
self.date.set(self.stry[3])
self.date2.set(self.stry[4])
self.shelf.set(self.stry[5])
self.form.set(self.stry[6])
#self.glass.set(self.strx[7])
def clickMe3(self):
self.pro.set('xx')
self.brand.set('xx')
self.quan.set('xx')
self.date.set('xx')
self.date2.set('xx')
self.shelf.set('xx')
self.form.set('xx')
self.glass.set('xx')
def create_widgets(self):
#PRODUCT ID ENTRY
ttk.Label(self, text='Enter Product ID:', font=cg_font4).grid(column=0, row=0, sticky='EW', padx=8, pady=4)
self.name5=tk.StringVar()
self.nameEntered5=ttk.Entry(self, width=30, textvariable=self.name5)
self.nameEntered5.grid(column=0, row=1,sticky='W', padx=8, pady=4)
tt.createToolTip(self.nameEntered5, 'Enter Product ID')
#PRODUCT NAME
ttk.Label(self, text='Product:', font=cg_font5).grid(column=0, row=2, sticky='W', padx=8, pady=4)
self.pro=tk.StringVar()
self.pro.set(self.strx[0])
proent=ttk.Entry(self, width=20, textvariable=self.pro)
proent.grid(column=1, row=2,sticky='W', padx=8, pady=4)
#tt.createToolTip(nameEntered5, 'Enter Product ID')
#BRAND NAME
ttk.Label(self, text='Brand:', font=cg_font5).grid(column=0, row=3, sticky='W', padx=8, pady=4)
self.brand=tk.StringVar()
self.brand.set(self.strx[1])
brandent=ttk.Entry(self, width=20, textvariable=self.brand)
brandent.grid(column=1, row=3,sticky='W', padx=8, pady=4)
#QUANTITY
ttk.Label(self, text='Quantity:', font=cg_font5).grid(column=0, row=4, sticky='W', padx=8, pady=4)
self.quan=tk.StringVar()
self.quan.set(self.strx[2])
quanent=ttk.Entry(self, width=20, textvariable=self.quan)
quanent.grid(column=1, row=4,sticky='W', padx=8, pady=4)
#DATE OF ENTRY
ttk.Label(self, text='Date of Entry:', font=cg_font5).grid(column=0, row=5, sticky='W', padx=8, pady=4)
self.date=tk.StringVar()
self.date.set(self.strx[3])
dateent=ttk.Entry(self, width=20, textvariable=self.date)
dateent.grid(column=1, row=5,sticky='W', padx=8, pady=4)
#DATE OF PURCHASE
ttk.Label(self, text='Date of Purchase:', font=cg_font5).grid(column=0, row=6, sticky='W', padx=8, pady=4)
self.date2=tk.StringVar()
self.date2.set(self.strx[4])
date2ent=ttk.Entry(self, width=20, textvariable=self.date2)
date2ent.grid(column=1, row=6,sticky='W', padx=8, pady=4)
#SHELF NUMBER
ttk.Label(self, text='Shelf Number:', font=cg_font5).grid(column=0, row=7, sticky='W', padx=8, pady=4)
self.shelf=tk.StringVar()
self.shelf.set(self.strx[5])
shelfent=ttk.Entry(self, width=20, textvariable=self.shelf)
shelfent.grid(column=1, row=7,sticky='W', padx=8, pady=4)
#food or medicine
ttk.Label(self, text='Food or Medicine?:', font=cg_font5).grid(column=0, row=8, sticky='W', padx=8, pady=4)
self.form=tk.StringVar()
self.form.set(self.strx[6])
forment=ttk.Entry(self, width=20, textvariable=self.form)
forment.grid(column=1, row=8, sticky='W', padx=8, pady=4)
#GLASS ITEM
ttk.Label(self, text='Glass Item?', font=cg_font5).grid(column=0, row=9, sticky='W', padx=8, pady=4)
self.glass=tk.StringVar()
self.glass.set(self.strx[7])
glsent=ttk.Entry(self, width=20, textvariable=self.glass)
glsent.grid(column=1, row=9,sticky='W', padx=8, pady=4)
self.action1=ttk.Button(self, text="Search", width=21, command= self.clickMe2)
#action.configure(state='disabled')#widget gets disabled
self.action1.grid(column=1, row=11, columnspan=2, sticky='w'+'e', padx=4, pady=2)
tt.createToolTip(self.action1, 'Click to Search')
self.action2=ttk.Button(self, text="Reset", width=21, command= self.clickMe3)
#action.configure(state='disabled')#widget gets disabled
self.action2.grid(column=1, row=10, columnspan=2, sticky='w'+'e', padx=4, pady=2)
tt.createToolTip(self.action2, 'Click to Reset')
def search(lab_frame):
se=Search(master=lab_frame)
#root = tk.Tk()
#app = Search(master=root)
#app.mainloop()
|
20,095 | 83d25bf90d61d82fdec0a0fa20b6a66d87d4c27f | def sumFunc(a, b):
return a + b
print(sumFunc(2, 4))
print(sumFunc(8, 4)) |
20,096 | 90dc4d3992ed77d18a3214cb4954240e03b33da6 | import json
class Challenge:
field_name = None
guid = None
label = None
type = None
image_data = None
options = None
def __init__(self, response):
self.field_name = response["field_name"]
self.guid = response["guid"]
self.label = response["label"]
self.type = response["type"]
if "image_data" in response:
self.image_data = response["image_data"]
if "options" in response:
self.options = response["options"]
|
20,097 | d328a6b092fa051b47a9a5ac759671f81108fc0c | """ Tests for spatial occupancy"""
import os
import sys
sys.path.insert(1, os.path.join(os.getcwd(), '..'))
os.environ['HOMESHARE'] = r'C:\temp\astropy'
import scipy.io as spio
import numpy as np
import pytest
import test_helpers as th
from opexebo.analysis import spatial_occupancy as func
print("=== tests_analysis_spatial_occupancy ===")
def test_circular_arena():
times = np.arange(5)
positions = np.array([[0,0], [1,0], [0,1], [-1,0], [0,-1]]).T - 1
speeds = np.ones(5)
kwargs = {"arena_shape":"circ", "arena_size":3, "bin_width":1, "speed_cutoff":0.1, "limits":(-2, 2.01, -2, 2.01)}
map, coverage, bin_edges = func(times, positions, speeds, **kwargs)
# import matplotlib.pyplot as plt
# plt.imshow(map)
# print(coverage, bin_edges)
def test_linear_arena():
# TODO!
pass
def test_invalid_inputs():
# wrong dimensions to positions
n = 10
with pytest.raises(ValueError):
times = np.arange(n)
positions = np.ones((3, n)) #!
speeds = np.ones(n)
func(times, positions, speeds, arena_size = 1)
with pytest.raises(ValueError):
times = np.arange(n)
positions = np.ones((2, n))
speeds = np.ones((2, n)) #!
func(times, positions, speeds, arena_size = 1)
# Mismatched pos/speed
with pytest.raises(ValueError):
times = np.arange(n)
positions = np.ones((2, n))
speeds - np.ones(n+1) #!
func(times, positions, speeds, arena_size = 1)
# No arena size
with pytest.raises(TypeError):
times = np.arange(n)
positions = np.ones((2, n))
speeds = np.ones(n)
func(times, positions, speeds) #!
# All nan
# This isn't explicit in the function, but comes as a result of excluding
# all non-finite values, and then being left with an empty array
with pytest.raises(ValueError):
times = np.arange(n)
positions = np.full((2,n), np.nan)
speeds = np.full(n, np.nan)
func(times, positions, speeds, arena_size = 1)
print("test_invalid_inputs passed")
#if __name__ == '__main__':
# test_circular_arena()
# test_linear_arena()
# test_invalid_inputs()
# |
20,098 | 17c8187e60feb5eca7bd19864e7e394d48237626 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
############################################################
# Created on: 2018-04-27
# Author: Joe Aaron
# Email: pant333@163.com
# Description: 有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来。
import os
import re
def stat_code(dir_path):
if not os.path.isdir(dir_path):
return
exp_re = re.compile(r'^#.*')
file_list = os.listdir(dir_path)
print("%s\t%s\t%s\t%s" % ('file', 'all_lines', 'space_lines', 'exp_lines'))
for file in file_list:
file_path = os.path.join(dir_path, file)
if os.path.isfile(file_path) and os.path.splitext(file_path)[1] == '.py':
with open(file_path) as f:
all_lines = 0
space_lines = 0
exp_lines = 0
for line in f.readlines():
all_lines += 1
if line.strip() == '':
space_lines += 1
continue
exp = exp_re.findall(line.strip())
if exp:
exp_lines += 1
print("%s\t%s\t%s\t%s" % (file, all_lines, space_lines, exp_lines))
if __name__ == '__main__':
stat_code('.')
|
20,099 | 5bf1a78fa8a35b212075f1d6e0190f3c663e5013 | """
"""
import enum
from collections import namedtuple
from contextlib import suppress
from .base import (ElementSpecs, ModelEnumElement, ModelElement, SingleValueElement,
ConstantElement, PredicatedElementMixIn)
from ..util import is_valid_iterable
__all__ = [
'create_model_element_type',
'Access',
'Assignment',
'Attempt',
'BinaryOp',
'ButIf',
'ClearValue',
'NewDice',
'Enlarge',
'ForEvery',
'IfThen',
'Leave',
'Load',
'ModifierCall',
'ModifierDef',
'Modify',
'Negation',
'NewBag',
'Oops',
'OverloadOperator',
'Reduce',
'Restart',
'RestartLocationSpecifier',
'SpecialAccessor',
'SpecialReference',
'StringLiteral',
'UntilDo',
'UseIf',
]
def create_model_element_type(name,
attrs=(),
constant=False,
specs=ElementSpecs(),
*,
basic_predicated=False):
"""
"""
class_attrs = {}
if basic_predicated and not specs.predicate_info:
specs = specs._replace(predicate_info=tuple(attrs[:3]))
if constant:
bases = (ConstantElement,)
elif not attrs:
bases = (SingleValueElement,)
else:
if 'codeinfo' not in attrs:
attrs = tuple((*attrs, 'codeinfo'))
if specs.predicate_info:
bases = (PredicatedElementMixIn, namedtuple(f'_{name}Base', attrs), ModelElement)
else:
bases = (namedtuple(f'_{name}Base', attrs), ModelElement)
class_attrs.setdefault('__specs__', specs)
return type(name, bases, class_attrs)
class Operator(ModelEnumElement):
"""
"""
class OneSidedOperator(Operator):
"""
"""
_ignore_ = ('python_name',)
python_name = None
HAS = ('has', 'has')
def __new__(cls, value, python_name):
self = object.__new__(cls)
self._value_ = value
self.python_name = python_name
return self
class TwoSidedOperator(Operator):
"""
"""
_ignore_ = ('left_python_name', 'right_python_name')
left_python_name = right_python_name = None
# Two sided
MULTIPLY = ('*', 'l_multiply', 'r_multiply')
TRUEDIV = ('%/', 'l_truediv', 'r_truediv')
FLOORDIV = ('/', 'l_floordiv', 'r_floordiv')
MODULO = ('%', 'l_modulo', 'r_modulo')
ADD = ('+', 'l_add', 'r_add')
SUBTRACT = ('-', 'l_subtract', 'r_subtract')
OR = ('or', 'l_or', 'r_or')
AND = ('and', 'l_and', 'r_and')
ISA = ('isa', 'l_isa', 'r_isa')
EXPAND = ('@', 'l_expand', 'r_expand')
AMPERSAND = ('&', 'l_ampersand', 'r_ampersand')
EQUALS = ('==', 'r_equals', 'l_equals')
NOT_EQUALS = ('!=', 'r_not_equals', 'l_not_equals')
GREATER_THAN_EQUALS = ('>=', 'r_greater_than_equals', 'l_greater_than_equals')
LESS_THAN_EQUALS = ('<=', 'r_less_than_equals', 'l_less_than_equals')
LESS_THAN = ('<', 'r_less_than', 'l_less_than')
GREATER_THAN = ('>', 'r_greater_than', 'l_greater_than')
def __new__(cls, value, left_python_name, right_python_name):
self = object.__new__(cls)
self._value_ = value
self.left_python_name = left_python_name
self.right_python_name = right_python_name
return self
class OverloadOnlyOperator(Operator):
"""
"""
_ignore_ = ('python_name',)
python_name = None
LENGTH = ('#', 'length')
REDUCE = ('{}', 'reduce')
SUBJECT = ('?', 'as_subject')
ITERATE = ('forevery', 'iterate')
ISZERO = ('0', 'iszero')
def __new__(cls, value, python_name):
self = object.__new__(cls)
self._value_ = value
self.python_name = python_name
return self
class SpecialAccessor(ModelEnumElement):
"""
"""
LENGTH = '#'
TOTAL = '+'
VALUE = '='
EVERY = '*'
PARENT = '^'
class SpecialEntry(ModelEnumElement):
"""
"""
PARENT = '^'
ACCESS = '.'
SET = '='
CLEAR = 'clear'
CREATE = ':'
DESTROY = '!'
class SpecialReference(ModelEnumElement):
"""
"""
SUBJECT = '?'
ALL = '*'
NONE = '!'
ERROR = '#'
# scopes
GLOBAL = '@'
ROOT = '~'
PARENT = '^'
LOCAL = '$'
# pylint: disable=no-member
def __nonzero__(self):
return self != SpecialReference.NONE
class RestartLocationSpecifier(ModelEnumElement):
"""
"""
AT = 'at'
BEFORE = 'before'
AFTER = 'after'
class StringLiteral(namedtuple('_StringLiteralBase', ('parts', 'codeinfo')), ModelElement):
"""
"""
__specs__ = ElementSpecs(intern_strings=False)
def __new__(cls, parts, codeinfo):
if isinstance(parts, str):
new_parts = [parts]
else:
new_parts = []
for part in parts:
if isinstance(part, StringLiteral):
new_parts.extend(part.parts)
else:
new_parts.append(part)
return super().__new__(cls, tuple(new_parts), codeinfo=codeinfo)
@property
def value(self):
"""
"""
if isinstance(self.parts, str):
return self.parts
if len(self.parts) == 1:
return self.parts[0]
return ''.join(self.parts)
def _to_test_dict(self):
"""
"""
return {
'_class': type(self).__name__,
'value': self.value,
}
@classmethod
def preevaluate(cls, value):
return value.value
SingleValueElement.register(StringLiteral)
class Reference(create_model_element_type('BaseReference')):
"""
"""
def __new__(cls, value, *, codeinfo):
with suppress(ValueError):
return SpecialReference(value)
#pylint: disable=too-many-function-args
return super().__new__(cls, value, codeinfo=codeinfo)
Reference.register(SpecialReference)
class OperationSide(enum.Enum):
"""
"""
NA = enum.auto()
LEFT = enum.auto()
RIGHT = enum.auto()
def __bool__(self):
return self != OperationSide.NA
def __invert__(self):
if self is OperationSide.LEFT:
return OperationSide.RIGHT
if self is OperationSide.RIGHT:
return OperationSide.LEFT
return OperationSide.NA
# Loops
UntilDo = create_model_element_type('UntilDo', ('name', 'until', 'do', 'otherwise'))
""" """
ForEvery = create_model_element_type('ForEvery', ('name', 'item_name', 'iterable', 'do'))
""" """
Restart = create_model_element_type('Restart', ('location_specifier', 'target'))
""" """
# Error Handling
Attempt = create_model_element_type('Attempt', ('attempt', 'buts', 'always'))
""" """
ButIf = create_model_element_type('ButIf', ('predicate', 'statement'), basic_predicated=True)
""" """
Oops = create_model_element_type('Oops')
""" """
# Modifiers
Modify = create_model_element_type('Modify', ('subject', 'modifiers'))
""" """
ModifierCall = create_model_element_type('ModifierCall', ('modifier', 'args'))
""" """
ModifierDef = create_model_element_type('ModifierDef', ('target', 'parameters', 'definition'))
""" """
Leave = create_model_element_type('Leave', constant=True)
""" """
# Rolls
Fill = create_model_element_type('Fill', ('size', 'value'))
""" """
NewRoll = create_model_element_type('NewRoll')
""" """
#TODO
Expand = create_model_element_type('Expand')
""" """
BinaryOp = create_model_element_type('BinaryOp', ('left', 'op', 'right'))
""" """
Negation = create_model_element_type('Negation')
""" """
Assignment = create_model_element_type('Assignment', ('target', 'value'))
""" """
Load = create_model_element_type('Load', ('to_load', 'load_from', 'into'))
""" """
Access = create_model_element_type('Access', ('accessing', 'accessors'))
""" """
Enlarge = create_model_element_type('Enlarge', ('size', 'value'))
""" """
Reduce = create_model_element_type('Reduce')
""" """
NewBag = create_model_element_type('NewBag')
""" """
RawAccessor = create_model_element_type('RawAccessor')
""" """
OverloadOperator = create_model_element_type('OverloadOperator', ('operator', 'side'))
""" """
ClearValue = create_model_element_type('ClearValue')
""" """
# Predicates
UseIf = create_model_element_type('UseIf', ('use', 'predicate', 'otherwise'),
specs=ElementSpecs(
predicate_info=('predicate', 'use', 'otherwise'),
))
""" """
NewDice = create_model_element_type('NewDice', ('number_of_dice', 'sides'))
""" """
class IfThen(
create_model_element_type('_IfThenParent', ('predicate', 'then', 'otherwise'),
basic_predicated=True)):
""" """
def __new__(cls, predicate, then, otherwise, *, codeinfo):
if then is None:
then = ()
elif not is_valid_iterable(then) or not isinstance(then, tuple):
then = (then,)
if otherwise is None:
otherwise = ()
elif not is_valid_iterable(otherwise) or not isinstance(otherwise, tuple):
otherwise = (otherwise,)
#pylint: disable=unexpected-keyword-arg
return super().__new__(
cls,
predicate=predicate,
then=then,
otherwise=otherwise,
codeinfo=codeinfo,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.