index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,700 | 1bff28abb36886dfd87888cdb19228007ead5ed8 | a = input('Введите числo: ')
y = a.count('3') + a.count('4') + a.count('5') + a.count('6') + a.count('7') + a.count('8')
print (y) |
988,701 | b3ef9c2fd69da3f6000578af075baec4c6ed0d05 | from django.db import models
# Create your models here.
class Record(models.Model):
record_number = models.CharField(max_length=50, default='python')
title = models.CharField(max_length=50, default='python')
expertise_status = models.CharField(max_length=50, default='python')
payment_status = models.CharField(max_length=50, default='python')
arrangement_type = models.CharField(max_length=50, default='python')
action_lawyer = models.CharField(max_length=50, default='python')
comments = models.CharField(max_length=550, default='python')
dependency = models.CharField(max_length=50, default='python')
city = models.CharField(max_length=50, default='python') |
988,702 | 03cb5ea937bc37acff83ac6b0f07cb42f0d155cf | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.setrecursionlimit(10 ** 9)
n = int(input())
ng1 = int(input())
ng2 = int(input())
ng3 = int(input())
def dfs (n:int, c:int):
print (n,c)
if c <= 100 and n == 0: #100回以内に0にできた
print ("YES")
exit ()
if c == 100: #100回目に到達したので、101回目の操作はできない
return
if n-1 != ng1 and n-1 != ng2 and n-1 != ng3:
dfs (n-1,c+1)
elif n-2 != ng1 and n-2 != ng2 and n-2 != ng3:
dfs (n-2,c+1)
elif n-3 != ng1 and n-3 != ng2 and n-3 != ng3:
dfs (n-3,c+1)
else: # どれを引いても ng1/ng2/ng3 になる
return
if n == ng1 or n == ng2 or n == ng3:
print ("NO")
else:
dfs (n,0)
print ("NO")
|
988,703 | 7b0fdcf42e5db4e330e12c635c59d4da12ab0c11 | import logging
from shlex import quote as cmd_quote
import speedling.tasks
from speedling import conf
from speedling import facility
from speedling import gitutils
from speedling import localsh
from speedling import usrgrp
from speedling import util
LOG = logging.getLogger(__name__)
# user and endpoint creation is parallel
# fernet and db related stuff is parallel
def task_keystone_fernet(self):
keystones = self.hosts_with_service('keystone')
src_node = util.rand_pick(keystones)
dst_nodes = keystones - src_node
assert src_node
self.call_do(src_node,
self.do_fernet_init)
if dst_nodes:
ret = self.call_do(src_node,
self.do_fetch_fernet_as_tar)
fernet_tar = ret[next(iter(src_node))]['return_value']
self.distribute_for_command(dst_nodes, fernet_tar,
'tar -C / -x')
def task_cfg_httpd(self): # split it into its own componenet delegate wsgi
facility.task_wants(speedling.tasks.task_selinux)
keystones = self.hosts_with_service('keystone')
self.call_do(keystones, self.do_httpd_restart)
self.wait_for_components(self.memcached)
def task_keystone_db(self):
self.wait_for_components(self.sql)
# TODO: change the function to near db and near key parts
schema_node_candidate = self.hosts_with_service('keystone')
schema_node = util.rand_pick(schema_node_candidate)
sync_cmd = 'su -s /bin/sh -c "keystone-manage db_sync" keystone'
self.call_do(schema_node, facility.do_retrycmd_after_content, c_args=(sync_cmd, ))
def task_cfg_keystone_steps(self):
facility.task_will_need(self.task_cfg_httpd)
facility.task_wants(self.task_keystone_db, self.task_keystone_fernet)
keystones = self.hosts_with_service('keystone')
self.call_do(util.rand_pick(keystones),
self.do_keystone_init)
def task_keystone_endpoints(self):
facility.task_wants(self.task_cfg_keystone_steps, self.task_cfg_httpd)
self.call_do(util.rand_pick(self.hosts_with_service('keystone')),
self.do_keystone_endpoint_sync, c_args=(self.registered_endpoints,))
def task_keystone_users(self):
facility.task_wants(self.task_cfg_keystone_steps, self.task_cfg_httpd)
self.call_do(util.rand_pick(self.hosts_with_service('keystone')),
self.do_keystone_user_sync, c_args=(self.registered_user_dom,))
def task_keystone_ready(self):
facility.task_wants(self.task_keystone_users, self.task_keystone_endpoints)
LOG.info('Keystone data sync completed')
class Keystone(facility.OpenStack):
origin_repo = 'https://github.com/openstack/keystone.git'
deploy_source = 'src'
services = {'keystone': {'deploy_mode': 'mod_wsgi'}}
def __init__(self, *args, **kwargs):
super(Keystone, self).__init__(*args, **kwargs)
self.peer_info = {}
self.final_task = self.bound_to_instance(task_keystone_ready)
for f in [task_keystone_users, task_keystone_endpoints, task_cfg_keystone_steps, task_keystone_db, task_cfg_httpd, task_keystone_fernet]:
self.bound_to_instance(f)
self.sql = self.dependencies["sql"] # raises
self.memcached = self.dependencies["memcached"] # raises
self.loadbalancer = self.dependencies.get("loadbalancer", None)
# consider the Default domain always existing
self.registered_user_dom = {'Default': {}}
self.registered_endpoints = {}
def do_keystone_endpoint_sync(cname, enp):
self = facility.get_component(cname)
from keystoneauth1.identity import v3
import slos.ossync
auth = v3.Password(auth_url='http://localhost:5000/v3', username='admin',
password=util.get_keymgr()(self.name, 'admin@default'), project_name='admin',
user_domain_name='Default',
project_domain_name='Default')
# session object is not thread safe, using auth ;(((
# TODO: wipe python client usage, looks like,
# I cannot use the same token in all threads
endpoint_override = 'http://localhost:5000/v3'
slos.ossync.endpoint_sync(auth, enp, endpoint_override=endpoint_override)
def do_keystone_user_sync(cname, dom):
self = facility.get_component(cname)
from keystoneauth1.identity import v3
import slos.ossync
auth = v3.Password(auth_url='http://localhost:5000/v3', username='admin',
password=util.get_keymgr()(self.name, 'admin@default'), project_name='admin',
user_domain_name='Default',
project_domain_name='Default')
# session object is not thread safe, using auth ;(((
# TODO: wipe python client usage, looks like,
# I cannot use the same token in all threads
endpoint_override = 'http://localhost:5000/v3'
slos.ossync.user_dom_sync(auth, dom, endpoint_override=endpoint_override)
def do_fernet_init(cname):
self = facility.get_component(cname)
self.have_content()
localsh.run("""
mkdir -p /etc/keystone/fernet-keys # replace with install
chown keystone:keystone /etc/keystone/fernet-keys
chmod 770 /etc/keystone/fernet-keys
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
""")
def do_keystone_init(cname):
self = facility.get_component(cname)
self.have_content()
localsh.run("keystone-manage bootstrap --bootstrap-password %s" %
cmd_quote(util.get_keymgr()(self.name, 'admin@default')))
def etc_keystone_keystone_conf(self): return {
'DEFAULT': {'debug': True},
'database': {'connection': self.sql.db_url('keystone')},
'token': {'provider': 'fernet'},
'cache': {'backend': 'dogpile.cache.memcached'} # TODO: non local memcachedS
}
def etc_httpd_conf_d_wsgi_keystone_conf(self):
srv_name = 'httpd' if util.get_distro()['family'] == 'redhat' else 'apache2'
log_dir = '/var/log/' + srv_name
return """Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{{GROUP}}
WSGIProcessGroup keystone-public
WSGIScriptAlias / {bin_dir}/keystone-wsgi-public
WSGIApplicationGroup %{{GLOBAL}}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{{cu}}t %M"
</IfVersion>
ErrorLog {log_dir}/keystone-error.log
CustomLog {log_dir}/keystone-access.log combined
<Directory {bin_dir}>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{{GROUP}}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / {bin_dir}/keystone-wsgi-admin
WSGIApplicationGroup %{{GLOBAL}}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{{cu}}t %M"
</IfVersion>
ErrorLog {log_dir}/keystone-error.log
CustomLog {log_dir}/keystone-access.log combined
<Directory {bin_dir}>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
""".format(bin_dir='/usr/local/bin', log_dir=log_dir)
def etccfg_content(self):
super(Keystone, self).etccfg_content()
keystone_git_dir = gitutils.component_git_dir(self)
usrgrp.group('keystone', 163)
usrgrp.user('keystone', 'keystone', home=keystone_git_dir)
self.file_path('/etc/keystone',
owner='keystone', group='keystone')
self.file_ini('/etc/keystone/keystone.conf',
self.etc_keystone_keystone_conf(),
owner='keystone', group='keystone')
distro = util.get_distro()['family']
if distro == 'debian':
# switch to simlink
cfg_dir = '/etc/apache2/sites-enabled'
elif distro == 'suse':
cfg_dir = '/etc/apache2/conf.d'
else: # redhat familiy and this is expected in more distros
cfg_dir = '/etc/httpd/conf.d'
self.file_plain(cfg_dir + '/wsgi-keystone.conf',
self.etc_httpd_conf_d_wsgi_keystone_conf(),
mode=0o644)
def get_node_packages(self):
pkgs = super(Keystone, self).get_node_packages()
if self.deploy_source == 'pkg':
pkgs.update({'openstack-keystone'})
pkgs.update({'srv-http\\apache-httpd', 'lib-dev\\openldap',
'lib-http-py3\\mod_wsgi', 'lib-py3\\pymemcached',
'python3-keystoneauth1', 'python3-keystoneclient'})
# until the httpd does not gets it's own module
return pkgs
def do_httpd_restart(cname):
self = facility.get_component(cname)
self.have_content()
srv_name = 'httpd' if util.get_distro()['family'] == 'redhat' else 'apache2'
localsh.run("systemctl reload-or-restart " + srv_name)
# TODO: httpd needs ot be moved and spacially ahndled (consider multiple instances)
def do_fetch_fernet_as_tar(cname):
return localsh.ret('tar -c /etc/keystone/fernet-keys', binary=True)
def step_keystone_endpoints(self):
facility.task_wants(task_cfg_keystone_steps, self.task_cfg_httpd)
self.call_do(util.rand_pick(self.hosts_with_service('keystone')),
self.do_keystone_endpoint_sync, c_args=(self.registered_endpoints))
def step_keystone_users(self):
facility.task_wants(task_cfg_keystone_steps, self.task_cfg_httpd)
self.call_do(util.rand_pick(self.hosts_with_service('keystone')),
self.do_keystone_user_sync, c_args=(facility.service_user_dom(),))
def step_keystone_ready(self):
facility.task_wants(self.step_keystone_users, self.step_keystone_endpoints)
LOG.info('Keystone data sync completed')
def compose(self):
super(Keystone, self).compose()
url_base = "http://" + conf.get_vip('public')['domain_name']
dr = conf.get_default_region()
self.register_endpoints(region=dr,
name='keystone',
etype='identity',
description='OpenStack Identity',
eps={'admin': url_base + ':35357',
'internal': url_base + ':5000',
'public': url_base + ':5000'})
self.register_project_in_domain('Default', 'admin', 'members are full admins')
self.register_user_in_domain('Default', 'admin',
password=util.get_keymgr()(self.name, 'admin@default'),
project_roles={('Default', 'admin'): ['admin']})
keystones = self.hosts_with_service('keystone')
self.sql.populate_peer(keystones, ['client'])
sql = self.sql
sql.register_user_with_schemas('keystone', ['keystone'])
util.bless_with_principal(keystones,
[(self.name, 'admin@default'), (sql.name, 'keystone')])
def authtoken_section(self, service_user):
# openstack ini file handles % specially
# now we are escaping just the password and just here (lower layer does not do escape ATM)
pwd = util.get_keymgr()(self.name, service_user + '@default')
pwd = pwd.replace('%', '%%')
d = {"auth_url": 'http://' + conf.get_vip('public')['domain_name'] + ':5000/',
"project_domain_name": 'Default',
"project_name": 'service',
"password": util.get_keymgr()(self.name, service_user + '@default'),
"user_domain_name": 'Default',
"username": service_user,
"auth_type": 'password'}
return d
def get_peer_info(self):
n = self.get_this_node()
return n['peers']['keystone']
def populate_peer(self, nodes):
port = 35357
if not self.peer_info:
hostname = addr = conf.get_vip('internal')['domain_name']
self.peer_info['client'] = {'hostname': hostname, 'addr': addr,
'port': port}
for n in nodes:
node = self.get_node(n)
node['peers']['keystone'] = self.peer_info
@staticmethod
def endp_triple(url):
return {'admin': url, 'public': url, 'internal': url}
def _access_region(self, region):
if region in self.registered_endpoints:
r_dict = self.registered_endpoints[region]
else:
r_dict = {}
self.registered_endpoints[region] = r_dict
return r_dict
def set_parent_region(self, region, parent):
r = self._access_region(region)
self._access_region(parent)
r['parent_region_id'] = parent
def set_region_description(self, region, description):
r = self._access_region(region)
r['description'] = description
def _access_services(self, region):
if 'services' in region:
return region['services']
services = []
region['services'] = services
return services
def _find_named_service(self, srvs, name):
# warning linear search
for d in srvs:
if d['name'] == name:
return d
def register_endpoints(self, region, name, etype, description, eps):
r = self._access_region(region)
srvs = self._access_services(r)
# handle name as primary key
s = self._find_named_service(srvs, name)
if s:
LOG.warning("Redeclaring {name} service in the {region}".format(name=name, region=region))
else:
s = {'name': name}
srvs.append(s)
s['type'] = etype
s['description'] = description
s['endpoints'] = eps
def register_endpoint_tri(self, region, name, etype, description, url_base):
eps = self.endp_triple(url_base)
self.register_endpoints(region, name, etype, description, eps)
# TODO: not all service requires admin role, fix it,
# the auth named ones does not expected to be used in place
# where admin ness is really needed
# the cross service user usually requires admin ness
# `the admin` user was created by the kystone-manage bootstrap
# domain name here case sensitive, but may not be in keystone
def register_domain(self, name):
if name in self.registered_user_dom:
return self.registered_user_dom[name]
d = {}
self.registered_user_dom[name] = d
return d
def register_group_in_domain(self, domain, group):
raise NotImplementedError
# it is also lookup thing, description applied from the first call
def register_project_in_domain(self, domain, name, description=None):
dom = self.register_domain(domain)
if 'projects' not in dom:
projects = {}
dom['projects'] = projects
else:
projects = dom['projects']
if name not in projects:
if description:
p = {'description': description}
else:
p = {}
projects[name] = p
return p
return projects[name]
def register_user_in_domain(self, domain, user, password, project_roles, email=None):
dom = self.register_domain(domain)
if 'users' not in dom:
users = {}
dom['users'] = users
else:
users = dom['users']
u = {'name': user, 'password': password, 'project_roles': project_roles}
if email:
u['email'] = email
users[user] = u
# TODO: move to keystone
# users just for token verify
# in the future it will create less privilgeded user
def register_auth_user(self, user, password=None):
keymgr = util.get_keymgr()
if not password:
password = keymgr('keystone', user + '@default') # TODO: multi keystone
self.register_project_in_domain('Default', 'service', 'dummy service project')
# TODO: try with 'service' role
self.register_user_in_domain(domain='Default', user=user, password=password,
project_roles={('Default', 'service'): ['admin']})
def register_service_admin_user(self, user, password=None):
keymgr = util.get_keymgr()
if not password:
password = keymgr('keystone', user + '@default')
self.register_project_in_domain('Default', 'service', 'dummy service project')
self.register_user_in_domain(domain='Default', user=user, password=password,
project_roles={('Default', 'service'): ['admin']})
|
988,704 | 735c57733cfd1ebb2171b58b579003c53790439b | # Generated by Django 2.1 on 2019-10-04 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0006_projectpage'),
]
operations = [
migrations.AddField(
model_name='project',
name='background_image',
field=models.ImageField(blank=True, null=True, upload_to='Projects/MainImages/BackgroundImages'),
),
]
|
988,705 | c6a2279c00d1de390e032167070b401412ab1c45 | from django.urls import path, include
from . import views
urlpatterns = [
path('incidencias/', include([
path('', views.events, name='evento_list'),
path('<int:unidad_id>/', views.events, name='unidad-evento_list'),
path('<int:unidad_id>/<int:trab_id>/', views.events, name='trab-evento_list')
])),
path('evento/agregar/', views.EventoCreate.as_view(), name='evento_create')
]
|
988,706 | 8a359413af62b277252f38a1a743244f6a31a081 | from disyo.models import DSApplication
from rest_framework import serializers
class DSApplicationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DSApplication
fields = (
'name',
'crunchbaseURI',
'githubContributorsCount',
'githubLatestCommitDate',
'githubStars',
'homepage',
'HQ',
'latestTweetDate',
'logoURI',
'organization',
'SVCUrl',
'twitterURI'
)
|
988,707 | ed5ea1acd9524690ab62ebaee66f594cbaa7c997 | """My_Shop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
import xadmin
from django.conf.urls import url, include
from django.views.generic import TemplateView
from django.views.static import serve
from rest_framework.authtoken import views
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from rest_framework_jwt.views import obtain_jwt_token
from My_Shop.settings import MEDIA_ROOT
from goods.views import GoodsListViewSet, CategoryViewset, BannerViewset, IndexCategoryViewset
from trade.views import ShoppingCartViewset, OrderViewset
from user_operation.views import UserFavViewset, LeavingMessageViewset, AddressViewset
from users.views import SmsCodeViewset, UserViewset
router = DefaultRouter()
router.register(r'goods', GoodsListViewSet, base_name='goods')
router.register(r'categorys', CategoryViewset, base_name='categorys')
router.register(r'codes', SmsCodeViewset, base_name='codes')
router.register(r'users', UserViewset, base_name='users')
router.register(r'userfavs', UserFavViewset, base_name='userfavs')
router.register(r'messages', LeavingMessageViewset, base_name='messages')
router.register(r'address', AddressViewset, base_name='address')
router.register(r'shopcarts', ShoppingCartViewset, base_name='shopcarts')
router.register(r'orders', OrderViewset, base_name='orders')
router.register(r'banners', BannerViewset, base_name='banners')
router.register(r'indexgoods', IndexCategoryViewset, base_name='indexgoods')
goods_list = GoodsListViewSet.as_view({
'get': 'list',
})
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
# url(r'^ueditor/', include('DjangoUeditor.urls')),
url(r'^media/(?P<path>.*)/$', serve, {'document_root': MEDIA_ROOT}),
url(r'^', include(router.urls)),
url(r'^docs/', include_docs_urls(title='陈氏集团')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# drf自带的token认证模式
url(r'^api-token-auth/', views.obtain_auth_token),
# jwt的认证接口
url(r'^login/$', obtain_jwt_token),
url('', include('social_django.urls', namespace='social')),
url(r'^index/', TemplateView.as_view(template_name='index.html'), name='index')
]
|
988,708 | a46da7da804c6c42115b767c83166fae285036a0 | sample = """Comet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.
Dancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds.
""".splitlines()
def parsedeers(sample):
deers = []
for line in sample:
deer = line.strip().split()
name = deer[0]
speed = int(deer[3])
duration = int(deer[6])
rest = int(deer[-2])
deers.append((name,speed,duration,rest))
return deers
deers = parsedeers(sample)
def race(deers,time):
maxdist = 0
for name,speed,duration,rest in deers:
dist = 0
cycle = duration+rest
cycles = time//cycle
remainder = time%cycle
dist += speed*duration*cycles
dist += speed*min(duration,remainder)
#print(name, dist)
if dist > maxdist:
maxdist = dist
return maxdist
assert race(deers,1000)==1120
def score(deers,time):
scores = {x[0]:0 for x in deers}
dist = {x[0]:0 for x in deers}
states = {x[0]:(True,1) for x in deers}
for s in range(time):
for name,speed,duration,rest in deers:
flying, since = states[name]
if flying:
dist[name]+=speed
if since == duration:
states[name] = (not flying, 1)
#print(name,'resting after',s)
else:
states[name] = (flying, since+1)
if not flying:
if since == rest:
states[name] = (not flying, 1)
#print(name,'flying after',s)
else:
states[name] = (flying, since+1)
# score
win = max(dist.values())
for name,speed,duration,rest in deers:
if dist[name] == win:
scores[name]+=1
#print(scores)
return max(scores.values())
assert score(deers,1000)==689
with open('input14') as fp:
sample = fp.readlines()
deers = parsedeers(sample)
print('#1',race(deers,2503))
print('#2',score(deers,2503)) |
988,709 | a8835262c1ae7567d50f1b12f0343accde1b2848 | import unittest
import experiment
class TestExperiment(unittest.TestCase):
def test_params_line_enable_batching(self):
e = experiment.Experiment(
max_batch_size=100, enable_batching=True, tensorflow_intra_op_parallelism=100,
some_bullshit=500)
self.assertTrue(e.enable_batching)
self.assertCountEqual(['"--tensorflow_intra_op_parallelism=100"', '"--enable_batching"'],
e._get_parameters_line())
params = e._merge_params(
['"--tensorflow_intra_op_parallelism=100"', '"--enable_batching"'])
self.assertEqual(
'"--tensorflow_intra_op_parallelism=100", "--enable_batching"',
params)
if __name__ == '__main__':
unittest.main()
|
988,710 | 8a2a778c7de6d59b79401f03a0bd977c30256f1a | #autor: Jonatan Montenegro
#Email: mrmontenegro@gmail.com
#No estoy seguro del correcto funcionamiento de este ejercicio. no logre verificarlo, aguardo comentarios sobre alguna falla.
import csv
import sys
def costo_camion(nombre_archivo):
with open (nombre_archivo, 'rt') as f:
headers=next(f)
rows=csv.reader(f)
precioTotal=0
try:
for columna in rows:
cajones = float(columna[1])
precio = float(columna[2])
precioTotal+=round(cajones*precio)
return (round(precioTotal,2))
except ValueError:
print("archivo con datos invalidos por favor verifique")
if len(sys.argv) == 2:
nombre_archivo = sys.argv[1]
else:
nombre_archivo = 'C:/Users/User2021/Documents/python/unsam/ejercicios python/clase 2/archivos/camion.csv'
costo = costo_camion(nombre_archivo)
print('Costo total:', costo) |
988,711 | 5b584b6b209d55218064c86bbe4eafa8ac1a373b | # https://atcoder.jp/contests/abc181/tasks/abc181_c
# import sys
# # def input(): return sys.stdin.readline().rstrip()
# # input = sys.stdin.readline
# input = sys.stdin.buffer.readline
# from numba import njit
# from functools import lru_cache
# sys.setrecursionlimit(10 ** 7)
# @njit('(i8,i8[::1],i4[::1])', cache=True)
# def main():
# @lru_cache(None)
# def dfs():
# return
# return
# main()
def check(a, b):
if a[0] == 0:
if b[0] == 0:
return True
else:
return False
if b[0] == 0:
if a[0] == 0:
return True
else:
return False
if a[1] == 0:
if b[1] == 0:
return True
else:
return False
if b[1] == 0:
if a[1] == 0:
return True
else:
return False
if (a[0]*b[1] == a[1]*b[0]):
return True
return False
from itertools import combinations
N = int(input())
ans = "No"
xy = []
for i in range(N):
x, y = map(int, input().split())
xy.append((x,y))
for a, b, c in combinations(range(N), 3):
# print(a, b, c)
if check((xy[a][0]-xy[b][0], xy[a][1]-xy[b][1]), (xy[a][0]-xy[c][0], xy[a][1]-xy[c][1])):
ans = "Yes"
# print(a, b, c)
print(ans)
# S = input()
# n = int(input())
# N, K = map(int, input().split())
# l = list(map(int, (input().split())))
# A = [[int(i) for i in input().split()] for _ in range(N)]
|
988,712 | 0dff1aa299f2bc07c4f91b3708fd333f06899179 | # Ex04a.py
from ch.aplu.jgamegrid import GameGrid, Actor, Location
from ch.aplu.util import X11Color
import random
# --------------------- class Hamster ---------------------
class Hamster(Actor):
def __init__(self):
Actor.__init__(self, "sprites/hamster.gif");
def act(self):
hazelnut = gg.getOneActorAt(self.getLocation(), Hazelnut)
if hazelnut != None:
hazelnut.removeSelf()
# Try to turn +-90 degrees each 5 periods
if self.nbCycles % 5 == 0:
if random.random() < 0.5:
self.turn(90)
else:
self.turn(-90)
# If new location is valid, move to it
if self.canMove():
self.move()
# if not, turn 90, 180 or 270 degrees until a valid location is found
else:
for i in range(1, 5):
self.turn(i * 90)
if self.canMove():
break
def canMove(self):
if self.isMoveValid() and gg.getOneActorAt(self.getNextMoveLocation(), Rock) == None:
return True # Inside grid and no rock
return False
# --------------------- class Rock ---------------------------
class Rock(Actor):
def __init__(self):
Actor.__init__(self, "sprites/rock.gif")
# --------------------- class Hazelnut -----------------------
class Hazelnut(Actor):
def __init__(self):
Actor.__init__(self, "sprites/hazelnut.gif")
# ----------------- main -----------------------------
gg = GameGrid(10, 10, 50, X11Color("green"))
gg.setBgColor(X11Color("darkGray"))
for i in range(10):
gg.addActor(Rock(), gg.getRandomEmptyLocation())
for i in range(20):
gg.addActor(Hazelnut(), gg.getRandomEmptyLocation())
gg.addActor(Hamster(), gg.getRandomEmptyLocation())
gg.show()
|
988,713 | add09f4db13d8be882cec07d63e974d70e3cd980 | # nlinvns
# % Written and invented
# % by Martin Uecker <muecker@gwdg.de> in 2008-09-22
# %
# % Modifications by Tilman Sumpf 2012 <tsumpf@gwdg.de>:
# % - removed fftshift during reconstruction (ns = "no shift")
# % - added switch to return coil profiles
# % - added switch to force the image estimate to be real
# % - use of vectorized operation rather than "for" loops
# %
# % Version 0.1
# %
# % Biomedizinische NMR Forschungs GmbH am
# % Max-Planck-Institut fuer biophysikalische Chemie
# Adapted for Python by O. Maier
import numpy as np
import time
import pyfftw
def nlinvns(Y, n, *arg): # *returnProfiles,**realConstr):
nrarg = len(arg)
if nrarg == 2:
returnProfiles = arg[0]
realConstr = arg[1]
elif nrarg < 2:
realConstr = False
if nrarg < 1:
returnProfiles = 0
print('Start...')
alpha = 1
[c, y, x] = Y.shape
if returnProfiles:
R = np.zeros([c + 2, n, y, x], complex)
else:
R = np.zeros([2, n, y, x], complex)
# initialization x-vector
X0 = np.array(np.zeros([c + 1, y, x]), np.complex64) # 5,128,128
X0[0, :, :] = 1 # object part
# initialize mask and weights
P = np.ones(Y[0, :, :].shape, dtype=np.complex64) # 128,128
P[Y[0, :, :] == 0] = 0
W = weights(x, y) # W128,128
# P = fftshift2(P) #128,128
W = np.fft.fftshift(W, axes=(-2, -1))
# Y = fftshift2(Y) # 4,128,128
# normalize data vector
yscale = 100 / np.sqrt(scal(Y, Y))
YS = Y * yscale # check
# YS = np.round(YS,4) #4,128,128
XT = np.zeros([c + 1, y, x], dtype=np.complex64) # 5,128,128
XN = np.copy(X0) # 5,128,128
start = time.perf_counter()
for i in range(0, n):
# the application of the weights matrix to XN
# is moved out of the operator and the derivative
XT[0, :, :] = np.copy(XN[0, :, :])
# W((+1)128,128)[None,...] (5,128,128)
XT[1:, :, :] = apweightsns(W, np.copy(XN[1:, :, :]))
RES = (YS - opns(P, XT))
print(np.round(np.linalg.norm(RES))) # check
# print(RES.shape) 4,128,128
# calculate rhs
# 128,128 128,128 5,128,128 4,128,128
r = derHns(P, W, XT, RES, realConstr)
r = np.array(r + alpha * (X0 - XN), dtype=np.complex64)
z = np.zeros_like(r)
d = np.copy(r)
dnew = np.linalg.norm(r)**2
dnot = np.copy(dnew)
for j in range(0, 500):
# regularized normal equations
q = derHns(P, W, XT, derns(P, W, XT, d), realConstr) + alpha * d
# q.shape = (5,128,128)
np.nan_to_num(q)
a = dnew / np.real(scal(d, q))
z = z + a * (d)
r = r - a * q
np.nan_to_num(r)
dold = np.copy(dnew)
dnew = np.linalg.norm(r)**2
d = d * ((dnew / dold)) + r
np.nan_to_num(d)
if (np.sqrt(dnew) < (1e-2 * dnot)):
break
print('(', j, ')')
XN = XN + z
alpha = alpha / 3
# postprocessing
CR = apweightsns(W, XN[1:, :, :])
if returnProfiles:
R[2:, i, :, :] = CR / yscale # ,6,9,128,128
C = (np.conj(CR) * CR).sum(0)
R[0, i, :, :] = (XN[0, :, :] * np.sqrt(C) / yscale)
R[1, i, :, :] = np.copy(XN[0, :, :])
R = (R)
end = time.perf_counter() # sec.process time
print('done in', round((end - start)), 's')
return R
def scal(a, b): # check
v = np.array(np.sum(np.conj(a) * b), dtype=np.complex64)
return v
def apweightsns(W, CT):
C = nsIfft(W * CT)
return C
def apweightsnsH(W, CT): # weglassen
C = np.conj(W) * nsFft(CT)
return C
def opns(P, X):
K = np.array(X[0, :, :] * X[1:, :, :], dtype=np.complex64)
K = np.array(P * nsFft(K), dtype=np.complex64) # [None,...]
return K
def derns(P, W, X0, DX):
K = X0[0, :, :] * apweightsns(W, DX[1:, :, :])
K = K + (DX[0, :, :] * X0[1:, :, :]) # A# 2/1
K = P * nsFft(K)
return K
def derHns(P, W, X0, DK, realConstr):
# print('derHns')
K = nsIfft(P * DK)
# print(K.shape) #4,128,128
if realConstr:
DXrho = np.sum(np.real(K * np.conj(X0[1:, :, :])), 0)
else:
DXrho = np.sum(K * np.conj(X0[1:, :, :]), 0)
DXc = apweightsnsH(W, (K * np.conj(X0[0, :, :])))
DX = np.array(np.concatenate(
(DXrho[None, ...], DXc), axis=0), dtype=np.complex64)
return DX
def nsFft(M):
si = M.shape
a = 1 / (np.sqrt((si[M.ndim - 1])) * np.sqrt((si[M.ndim - 2])))
K = np.array((pyfftw.interfaces.numpy_fft.fft2(
M, norm=None)).dot(a), dtype=np.complex64)
return K
def nsIfft(M):
si = M.shape
a = np.sqrt(si[M.ndim - 1]) * np.sqrt(si[M.ndim - 2])
# K = np.array(np.fft.ifftn(M, axes=(0,)),dtype=np.float64) #*a
K = np.array(pyfftw.interfaces.numpy_fft.ifft2(M, norm=None).dot(a))
return K # .T
def weights(x, y):
W = np.zeros([x, y])
for i in range(0, x):
for j in range(0, y):
d = ((i) / x - 0.5)**2 + ((j) / y - 0.5)**2
W[j, i] = 1 / (1 + 220 * d)**16 # 16
return W
|
988,714 | f8889d1a6417a7323e8195f91ffa958c52fc031f | #########################################
# #
# Created By: Nicholas Evans #
# Project Start Date: May 19th, 2020 #
# Backend Version 1.1.2 #
# #
#########################################
from flask import Flask
flask_app = Flask(__name__)
import common.routes as routes
if __name__ == '__main__':
routes.run() |
988,715 | 67205c4ffb96c65f94f6020c9479c226013de393 | import math
def counter(f):
def inner_function(i,j):
inner_function.counter += 1
return f(i,j)
inner_function.counter = 0
return inner_function
@counter
def pyt(a,b):
a = a
b = b
c = math.sqrt((a**2)+(b**2))
return c
a = [3,4,5]
b = [4,5,6]
for a,b in zip(a,b):
print pyt(a,b)
print pyt.counter
|
988,716 | a9ca21c9dcd3538a0cbf93b9358486ccd4f0df1f | import sys
import random
import math
import operator
from simulator import Marker
class Policy(object):
def __init__(self, color):
super(Policy, self).__init__()
self.color = color
class RandomPolicy(Policy):
def action(self, board):
return random.choice(board.legal_actions)
class BadPolicy(Policy):
def action(self, board):
return board.legal_actions[0]
class HumanPolicy(Policy):
def action(self, board):
print str(board) + "\n"
resp = None
while resp is None:
try:
resp = int(raw_input())
if resp == -1:
sys.exit()
except:
print "Invalid input."
pass
return resp
class BasicPolicy(Policy):
def __init__(self, color):
super(Policy, self).__init__()
self.color = color
self.basePi = RandomPolicy(color)
def action(self, board):
for action in board.legal_actions:
row = board.first_unfilled[action]
for direction in Marker.DIRECTIONS:
count_me = board.total_line_count(row, action, direction, self.color)
count_other = board.total_line_count(row, action, direction, 1 - self.color)
if count_me == 4 or count_other == 4:
return action
return self.basePi.action(board)
class WeightedPolicy(Policy):
# (all features are boolean 0-1)
# 4 options for max friendly line length
# 5 options for number of friendly lines
# 4 options for max opponent line length
# 5 options for number of opponent lines
# 7 options for action location
FEATURES_COUNT = 4 + 5 + 4 + 5 + 7
def __init__(self, color, track_grads=False):
super(WeightedPolicy, self).__init__(color)
self.weights = [0.0 for i in xrange(WeightedPolicy.FEATURES_COUNT)]
self.track_grads = track_grads
self.grads = []
# These are some sets of learned weights that you can try if you don't want to go through the learning process.
# self.weights = [-2.20597254474, -1.78571440414, -0.845539983565, 4.10251716516, -2.51825361545, -2.16505039348, -0.208659399836, 5.3094751307, -1.35595706971, -0.370561310822, 1.48504421281, 1.67516404883, 1.02282711843, -0.679025543009, -1.54835951836]
def clear_gradients(self):
self.grads = []
def line_features(self, board, row, col, color):
max_len = 0
line_count = 0
for direction in Marker.DIRECTIONS:
length = board.total_line_count(row, col, direction, color) - 1
if length > 0:
line_count += 1
if length > max_len:
max_len = length
return max_len, line_count
# Scores are exp(f(s, a) . w) where f(s, a) is the features vector for the given state and action
# and w is the weights vector. The exp serves to add non-linearity, which speeds up learning.
# Because exp is increasing, it preserves the ordering of actions, but spreads them out.
def calculate_score(self, board, action):
row = board.first_unfilled[action]
features = [0.0 for i in xrange(len(self.weights))]
max_len_me, line_count_me = self.line_features(board, row, action, self.color)
features[max_len_me] = 1.0
features[line_count_me + 4] = 1.0
max_len_other, line_count_other = self.line_features(board, row, action, 1 - self.color)
features[max_len_other + 9] = 1.0
features[line_count_other + 13] = 1.0
features[action + 18] = 1.0
score = 0
for idx in xrange(len(self.weights)):
score += features[idx] * self.weights[idx]
return features, math.exp(score)
def action(self, board):
# Calculate scores for each action that's avaliable.
scores = {}
features = {}
for action in board.legal_actions:
features[action], scores[action] = self.calculate_score(board, action)
# Normalize the scores into a probability distribution.
total = 0
for action in scores:
total += scores[action]
probits = {}
for action in scores:
probits[action] = scores[action] / total
# Select a weighted sample from the actions -> probits dict.
sorted_probits = list(reversed(sorted(probits.iteritems(), key=operator.itemgetter(1))))
p = random.uniform(0, 1)
running_total = 0
selected_action = -1
for action, probit in sorted_probits:
running_total += probit
if running_total >= p:
selected_action = action
break
if selected_action < 0:
selected_action = sorted_probits[-1][0]
if not self.track_grads:
return selected_action
# calculate: grad log(pi(board, selected_action))
grad = []
for i in xrange(WeightedPolicy.FEATURES_COUNT):
f_i = features[selected_action][i]
prob = 0
for a in board.legal_actions:
prob += probits[a] * features[a][i]
grad.append(f_i - prob)
self.grads.append(grad)
return selected_action
|
988,717 | 683da31df6c21e0d3bef6be569412dd095aa3d93 |
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Customer(Base):
__tablename__ = 'Customer'
ID = Column(Integer, primary_key=True)
Name = Column(String(255))
Address = Column(String(255))
MobileNo = Column(String(100))
Email = Column(String(255))
BOD = Column(String(255))
class CustomerTo(Base):
__tablename__ = 'CustomerTo'
ID = Column(Integer, primary_key=True)
Name = Column(String(255))
Address = Column(String(255))
MobileNo = Column(String(100))
Email = Column(String(255))
BOD = Column(String(255)) |
988,718 | cc415e3cab17e500f645b4bd056ef519eb5aaae4 | import sys
import os
import logging
import numpy as np
import pandas as pd
import healpy as hp
import fitsio as ft
sys.path.insert(0, '/users/PHS0336/medirz90/github/LSSutils')
from lssutils.stats.cl import get_cl
sys.path.insert(0, '/users/PHS0336/medirz90/github/regressis')
from regressis import PhotometricDataFrame, Regression, DR9Footprint
from regressis.utils import setup_logging
logger = logging.getLogger('MockTest')
setup_logging()
path2input = sys.argv[1]
path2output = sys.argv[2]
print(f'input: {path2input}')
print(f'output: {path2output}')
version, tracer, suffix_tracer = 'SV3', 'LRG', 'mock'
dr9_footprint = DR9Footprint(256, mask_lmc=False, clear_south=False, mask_around_des=False, cut_desi=False)
params = dict()
params['output_dir'] = None
params['use_median'] = False
params['use_new_norm'] = False
params['regions'] = ['North']
dataframe = PhotometricDataFrame(version, tracer, dr9_footprint, suffix_tracer, **params)
dt = ft.read('/fs/ess/PHS0336/data/rongpu/imaging_sys/tables/0.57.0/nlrg_features_bmzls_256.fits')
ng = hp.read_map(path2input)
r2n = hp.reorder(np.arange(12*256*256), r2n=True)
feat_ = dt['features'][:, [0, 1, 4, 6, 11]]
featr_ = np.zeros((12*256*256, 5))
featr_[dt['hpix']] = feat_
features = featr_[r2n]
targets = ng[r2n]
fracarea = np.zeros(12*256*256)
fracarea_ = np.ones(targets.size)*np.nan
fracarea_[dt['hpix']] = 1.0
fracarea = fracarea_[r2n]
feature_names = ['ebv', 'nstar', 'galdepth_z', 'psfdepth_g', 'psfsize_g']
featpd = pd.DataFrame(features, columns=feature_names)
logger.info('Features')
dataframe.set_features(featpd, sel_columns=feature_names,
use_sgr_stream=False, features_toplot=False)
logger.info('Targets')
dataframe.set_targets(targets, fracarea=fracarea, )
logger.info('Build')
dataframe.build(cut_fracarea=False)
feature_names = ['ebv', 'nstar', 'galdepth_z', 'psfdepth_g', 'psfsize_g']
use_kfold = True
regressor_params = None
nfold_params = {'North':6}
regression = Regression(dataframe, feature_names=feature_names,
regressor_params=regressor_params, nfold_params=nfold_params,
regressor='RF', suffix_regressor='', use_kfold=use_kfold,
n_jobs=1, seed=123, compute_permutation_importance=False, overwrite=True)
wsys = 1./regression.get_weight(save=False).map
# measure C_ells
mask = fracarea_ > 0
cl_before = get_cl(ng, fracarea_, mask)
cl_after = get_cl(ng, fracarea_, mask, selection_fn=hp.reorder(wsys, n2r=True))
np.savez(path2output, **{'cl_before':cl_before['cl_gg']['cl'], 'cl_after':cl_after['cl_gg']['cl']})
|
988,719 | 2fe7da8446088ed322c84d15c712caac19f360d1 | #!/usr/bin/env python
# This module adapted from:
# https://gist.github.com/1108174.git
import os
import platform
import shlex
import struct
import subprocess
OS_NAME = platform.system()
if OS_NAME == "Windows":
import ctypes
else:
try:
import fcntl
except ImportError as e:
fcntl = None
try:
import termios
except ImportError as e:
termios = None
def get_terminal_size():
"""getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
tuple_xy = None
if OS_NAME == "Windows":
tuple_xy = _get_terminal_size_windows()
elif OS_NAME in ["Linux", "Darwin"] or OS_NAME.startswith("CYGWIN"):
tuple_xy = _get_terminal_size_linux()
elif tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
if tuple_xy is None:
tuple_xy = (80, 24)
return tuple_xy
def _get_terminal_size_windows():
if not hasattr(ctypes, "windll"):
return None
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = ctypes.windll.kernel32.GetStdHandle(-12)
csbi = ctypes.create_string_buffer(22)
res = ctypes.windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,\
left, top, right, bottom,\
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
def _get_terminal_size_tput():
"""get terminal width
src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
"""
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
return None
def ioctl_GWINSZ(fd):
if fcntl and termios:
try:
return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
except:
return None
def _get_terminal_size_linux():
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
x, y = get_terminal_size()
print("width = {0}, height = {1}.".format(x, y))
|
988,720 | 5816118e2189107946c289c2c36f95eb29b51e7b | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 16 11:33:50 2017
@author: stanleygan
Project: Illuminating the diversity of pathogenic bacteria Borrelia Burgdorferi in tick samples
"""
from __future__ import division
from collections import defaultdict, Counter
from scipy.misc import comb
from scipy.stats import entropy
from math import log
import pandas as pd
import csv
import os
import re
import itertools
import numpy as np
import cplex
import sys
import math
import variantILP as varSolver
#import matplotlib.pyplot as plt
#from scipy.spatial.distance import hamming
errorThres=0.1
''' Extend class as need to retrieve solution if gap does not converge after certain time'''
class TimeLimitCallback(cplex.callbacks.MIPInfoCallback):
def __call__(self):
if not self.aborted and self.has_incumbent():
relGap = 100.0 * self.get_MIP_relative_gap()
totalTimeUsed = self.get_time() - self.starttime
if totalTimeUsed > self.timelimit and relGap < self.acceptablegap:
print("Good enough solution at", totalTimeUsed, "sec., gap =",
relGap, "%, quitting.")
self.aborted = True
self.abort()
''' ====================================== Functions related to processing raw data ======================================================= '''
'''
Return the sum of all quality scores in Qmatrix
Input: Qmatrix, dataframe
'''
def compute_QSum(Qmatrix):
return (Qmatrix.sum()).sum()
'''
Compute the probability of read of length n mapping to a variant with k mismatches using the binomial distribution
Input: n, integer
k, integer
'''
def compute_probability(n, k):
b = comb(n, k, exact=False)
x = math.pow(0.99,(n-k))
y = math.pow(0.01,k)
prob = b*x*y
return prob
#Return 2D dictionary
def tree():
return defaultdict(tree)
'''
Input: Dataframe with rows=reads, columns=variants
Output: The proportions of variants (type list)
'''
def bayes_compute_proportions(dataframe):
#computes the proportion of a set of variants given a set of reads uing probabilistic methods
prob_list = [] #a list to hold the probabilities
for row in dataframe.itertuples(index=False):
temp_list = list(row)
#compute the probability for each row in the matrix
for i in range(len(temp_list)):
if temp_list[i] >= 0:
temp_list[i] = compute_probability(152,int(temp_list[i]))
else:
temp_list[i] = 0
total = sum(temp_list)
#solve for k
#try except just in case when we encounter the weird issue where the decision variable for a predicted variant = 1 but was not output
try:
temp_list = [j*(1.0/total) for j in temp_list]
except ZeroDivisionError:
print(total)
print(temp_list)
prob_list.append(temp_list)
col_sums = [sum(k) for k in zip(*prob_list)]
total_sum = sum(col_sums)
prop_list = [100.0*l*(1/total_sum) for l in col_sums]
return prop_list
def kallisto_proportions(alleles, kal_cmd, seq_dict, first_fa, second_fa):
with open("temp_prop.fas", "w") as f:
for a in alleles:
f.write(">"+a+"\n")
f.write(seq_dict[">"+a]+"\n")
#kallisto index
print os.getcwd()
kal_idx_cmd = kal_cmd + ' index -i temp_prop.idx temp_prop.fas >/dev/null 2>&1'
os.system(kal_idx_cmd)
#Run kallisto quantifier
kallisto_cmd = kal_cmd + ' quant -t 4 -i temp_prop.idx -o temp_prop {0} {1} >/dev/null 2>&1'.format(first_fa, second_fa)
os.system(kallisto_cmd)
output_file = pd.read_csv(os.path.join(os.getcwd(), 'temp_prop', 'abundance.tsv'),sep='\t')
DF = output_file.loc[:,['target_id','est_counts']]
DF = DF[DF['est_counts'] != 0]
DF['est_counts'] = (DF['est_counts']/float(DF['est_counts'].sum()))
#DF = DF[DF['est_counts'] > 1.0]
#DF['est_counts'] = (DF['est_counts']/DF['est_counts'].sum())
var_predicted = DF['target_id'].tolist()
props = DF['est_counts'].tolist()
prop_dict = {var_predicted[i]:props[i] for i in range(len(var_predicted))}
return prop_dict
'''
Input: Dataframe with rows=reads, columns=variants
Output: The proportions of variants (type list)
'''
def compute_proportions(dataframe):
#computes the proportion of a set of variants given a set of reads uing probabilistic methods
prob_list = [0.0]*dataframe.shape[1]
for row in dataframe.itertuples(index=False):
#mmInfo = [i for i in list(row) if i>=0]
mmInfo = [i for i in list(row) if i!=6]
min_mm = min(mmInfo)
numOfVar_minMm = len([i for i in list(row) if i== min_mm])
if numOfVar_minMm != len(list(row)) or len(list(row)) == 1:
#if numOfVar_minMm == 1:
for i in range(len(list(row))):
if list(row)[i] == min_mm:
prob_list[i] += 1/numOfVar_minMm
##Only run entropy if there are more than 2 alleles
#if mismatch_df.shape[1] != 1:
# #compute entropy
# matrix_mismatch = mismatch_df.as_matrix()
# filtered_matrix_mismatch = list()
# for i in range(matrix_mismatch.shape[0]):
# temp = set(matrix_mismatch[i,:])
# if len(temp) > 1:
# filtered_matrix_mismatch.append(list(matrix_mismatch[i,:]))
# filtered_matrix_mismatch = np.array(filtered_matrix_mismatch)
# entropy_list = list()
# for i in range(filtered_matrix_mismatch.shape[1]):
# distrib = list(filtered_matrix_mismatch[:,i])
# #In case we encounter -1, we insert different values for -1
# new_distrib = list()
# dummy = max(distrib) + 1
# for d in distrib:
# if d == -1:
# new_distrib.append(dummy)
# dummy += 1
# else:
# new_distrib.append(d)
#
# new_distrib_dict = dict(Counter(new_distrib))
# new_distrib_prop = [float(new_distrib_dict[k])/len(new_distrib) for k in new_distrib_dict.keys()]
# entro = entropy(new_distrib_prop, base=2)
# entropy_list.append(entro)
#
# scaled_entropy_list = [i/log(filtered_matrix_mismatch.shape[0], 2) for i in entropy_list]
# weights = [1-i for i in scaled_entropy_list]
#else:
# weights=[1]*len(prob_list)
##multiply probs with weights based on entropy
#prob_list = [w*p for (w, p) in itertools.izip(weights, prob_list)]
try:
normalize_term = 1.0/(sum(prob_list))
except ZeroDivisionError:
print("no unique first score")
normalized_term = 0
prob_list = [normalize_term * i for i in prob_list]
return np.round(prob_list,10)
#Create a dictionary given keys and values which are lists
def create_dictionary(keys, vals):
my_dict = dict()
if len(keys) == len(vals):
for i in range(len(keys)):
my_dict[keys[i]] = vals[i]
return my_dict
'''
Input: A dataframe with rows=reads, columns=variants, max_mm=maximum mismatch set
Output: Negative log likelihood score of this solution
'''
def compute_likelihood(df, max_mm):
numVar = df.shape[1]
likelihood_list = list()
for row in df.itertuples(index=False):
read = list(row)
temp = list()
for i in range(numVar):
if read[i] == -1: #treat those reads which do not map having mm=max_mm+1
prob = (0.01)**(max_mm+1) * (0.99)**(152 - max_mm -1)
temp.append(prob)
else:
prob = (0.01)**(read[i]) * (0.99)**(152 - read[i])
temp.append(prob)
likelihood_list.append( sum(temp) )
#Similar to method in GAML paper
likelihood_list = [i/(2.0*152*numVar) for i in likelihood_list]
neg_log_likelihood = [-1.0*np.log10(j) for j in likelihood_list]
score = sum(neg_log_likelihood)
return score
'''
Return a mismatch dataframe where row=reads and columns=alleles, entries=# of mismatches
Input: path, absolute path to reads.txt file
option, "paired" if combining mate pair reads into 1
'''
def returnMismatchMatrix(path, option):
df = pd.read_csv(path, sep='\t', header=None, usecols=[0,1,3], names=["Read", "Allele", "Mismatch"])
df["Mismatch"] = df["Mismatch"].str[-1] #grab mismatch number
df["Mismatch"] = pd.to_numeric(df["Mismatch"], errors='coerce')
if option == "paired":
df = df.groupby(["Read", "Allele"], as_index=False)["Mismatch"].sum() #combine two rows if option is paired
matrix = df.pivot(index="Read", columns="Allele", values="Mismatch") #transform into rows=reads, columns=alleles
matrix = matrix.fillna(-1)
if option == "paired":
matrix = matrix[(matrix>= -1) & (matrix<=6)]
else:
matrix = matrix[(matrix>= -1) & (matrix<=3)]
matrix = matrix.fillna(-1)
matrix = matrix[(matrix.T != -1).any()] #remove any rows with all -1 i.e. reads do not map to any alleles after limiting mm
matrix = matrix.loc[:, (matrix != -1).any(axis=0)] #remove any alleles not mapped by any reads after limiting mm
return matrix
'''
Summarize required information from SAM file, where the reads.txt file contains information
for each read mapped, alleles that it map to, base quality score, number of mismatches and mismatch position
'''
#def writeReadTable(capGene, iteration, option):
# readOutFile = open("{0}_{1}_{2}NoHeader.sam".format(capGene, iteration, option))
# writefile = open("{0}_{1}_{2}_reads.txt".format(capGene, iteration, option), "w")
# for line in readOutFile:
# fields = line.strip("\t").split()
# read = fields[0]
# allele = fields[2]
# quality = fields[10]
# mm = [i for i in fields if i.startswith("XM:i:")][0] #bowtie2
## mm = [i for i in fields if i.startswith("NM")][0] #bowtie
# mm_pos = [j for j in fields if j.startswith("MD:Z:")][0]
#
# writefile.write(read + "\t" + allele + "\t" + quality + "\t" + mm + "\t" + mm_pos + '\n')
#
# readOutFile.close()
# writefile.close()
'''
Combine two tags for mismatch position. For example, if mismatch position is "16^A2G2", we would like to get "18G2" as we do not care about
insertion details
Input: a, first string
b, second string
'''
def combiningTag(a, b):
firstNumbers = re.split("\D+", a)
firstNumbers = list(filter(None, firstNumbers))
first = firstNumbers[-1]
secondNumbers = re.split("\D+", b)
secondNumbers = list(filter(None, secondNumbers))
second = secondNumbers[0]
numbChanged = int(first) + int(second)
combined = (a)[:-len(firstNumbers[-1])] + str(numbChanged) + (b)[len(secondNumbers[0]):]
return combined
#Reconstruct the MD tag in SAM file as we are only interested in mismatches information but not insertion
#Input: md, md tag which is a string
def reconstructMDTag(md):
if '^' in md:
fields = re.split("\^[ATCG]+", md)
fields = list(filter(None, fields))
appendedStr = fields[0]
for i in range(1, len(fields) ):
appendedStr = combiningTag(appendedStr, fields[i])
else:
appendedStr = md
return appendedStr
#Return the base quality according to the position specified in MD tag
#Input: quality, a string
# mm_pos, a string
def returnQuality(quality, mm_pos):
q_list = list()
for index in range(len(mm_pos)):
temp = re.split("\D+", mm_pos[index])
temp = [int(i) for i in temp]
# print(index)
# print(mm_pos[index])
calculate_quality_pos = list()
calculate_quality_pos.append(temp[0])
for j in range(1, len(temp)-1):
calculate_quality_pos.append(calculate_quality_pos[j-1] + temp[j] + 1)
# print(calculate_quality_pos)
q = [ord( (quality[index])[k] ) for k in calculate_quality_pos]
q = [(k-33)/93 for k in q]
q_list.append(sum(q))
return q_list
'''
Return a dataframe where rows=reads and columns=alleles, entries=quality score
Input: path, absolute path to the reads.txt file
option, "paired" if combining mate pairs read as 1
'''
def returnQualityMatrix(path, option):
df = pd.read_csv(path, sep='\t', header=None, usecols=[0,1,2,3,4], names=["Read", "Allele", "Quality", "Mismatch", "Mm position"])
df.loc[:, "Mismatch"] = df.loc[:, "Mismatch"].str[-1]
df.loc[:, "Mismatch"] = pd.to_numeric(df.loc[:, "Mismatch"], errors='coerce')
df["Mm position"] = df["Mm position"].str.extract("MD:Z:(.*)", expand=False)
zeroMismatch = (df["Mismatch"] == 0)
df["Mm position"] = df["Mm position"].apply(reconstructMDTag)
df.loc[~zeroMismatch, "Quality"] = returnQuality(df[~zeroMismatch]["Quality"].tolist(), df[~zeroMismatch]["Mm position"].tolist())
df.loc[zeroMismatch, "Quality"] = 0
df["Quality"] = pd.to_numeric(df["Quality"], errors='coerce')
if option == "paired":
tempDF = df.groupby(["Read", "Allele"], as_index=False)["Mismatch", "Quality"].sum()
tempDF = tempDF[(tempDF["Mismatch"] >= 0) & (tempDF["Mismatch"]<=6)]
else:
tempDF = df[["Read", "Allele", "Mismatch", "Quality"]]
tempDF = tempDF[(tempDF["Mismatch"] >= 0) & (tempDF["Mismatch"]<=3)]
tempDF.reset_index(inplace=True, drop=True)
matrix = tempDF.pivot(index="Read", columns="Allele", values="Quality")
#The max quality score is 93.As we limit to 3 mismatches, hence the maximum of an entry is 93*3
if option == "paired":
matrix = matrix.fillna(6)
else:
matrix = matrix.fillna(3)
return matrix
''' ====================================== Functions related to allele prediction ======================================================= '''
'''
Return a dictionary with key=indices, values=dictionary where key=allele, value=proportions
Input:
gene, name of gene
paired_path, absolute path to reads.txt file
samp, sample name
'''
def getVarAndProp(gene, paired_path, samp):
#generate matrix
dataMatrixDF = returnMismatchMatrix(paired_path, "paired")
#Generate quality matrix
Qmatrix = returnQualityMatrix(paired_path, "paired")
#predict variants
pred_object_val,var_predicted,reads_cov, all_solutions, all_objective = varSolver.solver(dataMatrixDF, Qmatrix, "paired")
score_list = list()
min_score = sys.maxint
#Compute negative log likelihood score for each solution
for i in range(len(all_solutions)):
score = compute_likelihood(dataMatrixDF.loc[reads_cov, all_solutions[i]], 6)
score_list.append(score)
if score <= min_score:
min_score = score
argmin_score_list = [i for i in range(len(all_solutions)) if score_list[i] == min_score]
if len(argmin_score_list) > 1:
print("More than 1 solution having minimum negative log likelihood score.")
lexico_min_score_sol = [all_solutions[i] for i in argmin_score_list]
lexico_min_score_sol = sorted(lexico_min_score_sol)
var_predicted = lexico_min_score_sol[0]
else:
var_predicted = all_solutions[argmin_score_list[0]]
''' ====== '''
#compute proportions
#solutionsAndProp_dict is a dictionary in which the keys are just indices and values are dictionaries, with variant as key and proportion as value
solutionsAndProp_dict = dict()
dataMatrix_pred = Qmatrix.loc[reads_cov, var_predicted]
prop = compute_proportions(dataMatrix_pred)
pred_prop = create_dictionary(var_predicted, prop)
solutionsAndProp_dict[0] = pred_prop
print("Solutions:{}".format(all_solutions))
print("Score:{}".format(score_list))
return solutionsAndProp_dict, len(all_solutions)
'''
This function is needed only if compatibility and quality score filtering are not discriminative enough.
localMILP returns the objective value of the MILP for a given distribution
Input:
sample, name of sample
loci, a list of loci
gene_solProp_dict, a dictionary with key=gene, values=dictionary where key=allele, value=proportion
reference, a dataframe of all the existing strains
objectiveOption: "all" means include all objective components, "noPropAndErr" omits proportion and error terms
'''
def localMILP(sample, loci, gene_solProp_dict, reference, objectiveOption, timelimit, gap):
genesDF = pd.DataFrame(columns=loci)
for gene in loci:
genesDF[gene] = [gene_solProp_dict[gene]]
data = dict()
data[sample] = genesDF
data = roundProp(data)
newNameToOriName = dict()
namingIndex=1
for i in sorted(data.keys()):
newNameToOriName["s{}".format(namingIndex)] = i
data["s{}".format(namingIndex)] = data.pop(i)
namingIndex += 1
''' ============================================== Data handling ====================================================== '''
#paramaters
propFormat = 1 #proportion in percentage or fraction
#loci = ['clpA', 'clpX', 'nifS']
numLoci = len(loci)
#read data for samples and reference
allSamples = data.keys()
#Get proportions of variants at different locus for each sample
varAndProp = returnVarAndProportions(data)
#Get the combinations at all loci across all samples
strains, numOfComb= returnCombinationsAndNumComb(data, numLoci, loci)
# numOfComb = strainAndNumComb[1]
uniqueStrains = strains.drop_duplicates(loci)
uniqueStrains = (uniqueStrains[loci]).reset_index(drop=True)
uniqueStrains["ST"] = uniqueStrains.index.values + 1 #assign indices for strains or each unique combinations
strains = strains.merge(uniqueStrains, indicator=True, how="left") #assign the index to the combinations(as strain data frame contains duplicated rows)
strains = strains.drop("_merge",1)
#For each variants, get a mapping of which strains it maps to
varSampToST = mapVarAndSampleToStrain(strains, loci, allSamples)
#weights and decision variables for proportion of strains. weight=0 if the strain is in reference, otherwise =1. Notice there will be duplications of strain types
#here because for proportions, we consider sample by sample rather than unique strain types
proportionWeightDecVarDF = strains.merge(reference, indicator=True, how="left")
proportionWeightDecVarDF["_merge"].replace(to_replace="both", value=0, inplace=True)
proportionWeightDecVarDF["_merge"].replace(to_replace="left_only", value=1, inplace=True)
proportionWeightDecVarDF = proportionWeightDecVarDF.rename(columns = {"_merge":"Weights"})
#Add proportion decision variable names
proportionWeightDecVarDF["Decision Variable"] = np.nan
for samp in allSamples:
thisSample = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Sample']
propNameTemp = ["pi_%s_%d" %t for t in itertools.izip(thisSample, range(1,1+thisSample.shape[0]))]
#shorter name as CPLEX can't hold name with >16 char. Use last 3 digits of sample name to name decision variables i.e. SRR2034333 -> use 333
propNameTemp = [ele.replace("pi_{}".format(samp), "pi_{}".format(samp)) for ele in propNameTemp]
proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp, 'Decision Variable'] = propNameTemp
#weights and decision variables for unique strain types, weight=0 if strain is in reference, otherwise=1. no duplications
strainWeightDecVarDF = proportionWeightDecVarDF.drop_duplicates(loci)
retainCol = loci + ['Weights', 'ST']
strainWeightDecVarDF = strainWeightDecVarDF[retainCol].reset_index(drop=True)
strainWeightDecVarDF["Decision Variable"] = ["a{}".format(i) for i in range(1, strainWeightDecVarDF.shape[0] + 1)]
'''==================================== Forming ILP here ================================================'''
#Form a CPLEX model
model = cplex.Cplex()
#Some bound on cplex solver when gap finds it hard to converge
timelim_cb = model.register_callback(TimeLimitCallback)
timelim_cb.starttime = model.get_time()
timelim_cb.timelimit = timelimit
timelim_cb.acceptablegap = gap
timelim_cb.aborted = False
#minimize problem
model.objective.set_sense(model.objective.sense.minimize)
#add the decision variables for unqiue strain types
model.variables.add(obj=strainWeightDecVarDF['Weights'].values.tolist(), names=strainWeightDecVarDF['Decision Variable'], types = [model.variables.type.binary]* len(strainWeightDecVarDF['Weights'].values.tolist()))
#add proportions decision variables
if objectiveOption == "noPropAndErr":
model.variables.add(lb=[0]*proportionWeightDecVarDF.shape[0], ub=[propFormat]*proportionWeightDecVarDF['Weights'].shape[0], names=proportionWeightDecVarDF["Decision Variable"], types=[model.variables.type.continuous] * len(proportionWeightDecVarDF['Weights'].values.tolist()))
else:
model.variables.add(obj=[i for i in proportionWeightDecVarDF['Weights'].values.tolist()],lb=[0]*proportionWeightDecVarDF.shape[0],ub=[propFormat]*proportionWeightDecVarDF['Weights'].shape[0], names=proportionWeightDecVarDF["Decision Variable"], types=[model.variables.type.continuous] * len(proportionWeightDecVarDF['Weights'].values.tolist()))
#add linear constraints such that for each sample, the sum of the proportions of its variants combination = 1
propVarSumTo1 = list()
for samp in allSamples:
temp = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Decision Variable'].tolist()
propVarSumTo1.append([temp, [1]* len(temp)])
model.linear_constraints.add(lin_expr=propVarSumTo1, rhs=[propFormat]*len(propVarSumTo1), senses=["E"]*len(propVarSumTo1), names=["c{0}".format(i+1) for i in range(len(propVarSumTo1))])
#add linear constraints such that each decision variable a_i must be at least pi_jk in which pi_jk is the proportion of V_jk and V_jk=a_i
#By this, if we use any of the pi, we force a_i to be 1
indicLargerPropDF = pd.DataFrame(columns=["ST","Indicator"])
indicLargerPropDF["ST"] = strainWeightDecVarDF["ST"]
indicLargerPropDF["Indicator"] = strainWeightDecVarDF["Decision Variable"]
indicLargerPropDF = (indicLargerPropDF.merge(proportionWeightDecVarDF, indicator=True, how="left", on="ST"))[["ST","Indicator","Decision Variable"]]
indicLargerPropDF.rename(columns={"Decision Variable": "Proportion Variable"}, inplace=True)
indicMinusProp = list()
for i,pi in itertools.izip(indicLargerPropDF["Indicator"].tolist(), indicLargerPropDF["Proportion Variable"].tolist()):
indicMinusProp.append([[i, pi],[propFormat, -1]])
model.linear_constraints.add(lin_expr=indicMinusProp, rhs=[0]*len(indicMinusProp), senses=["G"]*len(indicMinusProp), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(indicMinusProp))] )
#Also, add linear constraints such that a_i - average of pi_jk <= 0.999. Otherwise will have case that a_i=1 and for all pi_jk, pi_jk=0
indicMinusAvgPropLess1_DF = indicLargerPropDF.groupby("Indicator")["Proportion Variable"].apply(list).reset_index()
indic = indicMinusAvgPropLess1_DF["Indicator"].tolist()
pV = indicMinusAvgPropLess1_DF["Proportion Variable"].tolist()
indicMinusAvgPropLess1_LHS = list()
for i in range(len(indic)):
a_i = indic[i]
pi_i = pV[i]
temp = list()
size = len(pi_i)
temp.append(a_i)
coef = list()
coef.append(propFormat)
for j in range(size):
temp.append(pi_i[j])
coef.append(-1.0/size)
indicMinusAvgPropLess1_LHS.append([temp, coef])
tolerance = 0.01*propFormat*0.01 #how much tolerance we set for the upper bound
model.linear_constraints.add(lin_expr=indicMinusAvgPropLess1_LHS, rhs=[propFormat - tolerance]*len(indicMinusAvgPropLess1_LHS), senses=["L"]*len(indicMinusAvgPropLess1_LHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(indicMinusAvgPropLess1_LHS))])
model.linear_constraints.add(lin_expr=indicMinusAvgPropLess1_LHS, rhs=[0]*len(indicMinusAvgPropLess1_LHS), senses=["G"]*len(indicMinusAvgPropLess1_LHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(indicMinusAvgPropLess1_LHS))])
#add error variables and linear constraints related to error terms
#create error variable names
varAndProp["Decision Variable"] = ["d_{}_".format(samp) for samp in varAndProp["Sample"].tolist() ]
varAndProp["Decision Variable"] = varAndProp["Decision Variable"] + varAndProp["Variant"]
#add error variables
#errorThres = 0.1
model.variables.add(obj=[1]*varAndProp.shape[0], names=varAndProp["Decision Variable"].tolist(), lb= [0]*varAndProp.shape[0], ub= [errorThres]*varAndProp.shape[0], types=[model.variables.type.continuous]*varAndProp.shape[0])
#add linear constraints such that for each sample, sum of pi_ik \dot V_ik (proportion \dot matrix representation) across all combinations = Proportion matrix
piDotComb = list()
piDotComb_2 = list()
propConstrRHS = list()
for locusName in varSampToST:
temp=list()
varSampToSTDict = varSampToST[locusName][0]
for (var, sample) in varSampToSTDict:
strainTypes = varSampToSTDict[(var, sample)]
propDecVar = proportionWeightDecVarDF[(proportionWeightDecVarDF["ST"].isin(strainTypes)) & (proportionWeightDecVarDF["Sample"] == "{}".format(sample))]["Decision Variable"]
errorDecVar = varAndProp[(varAndProp["Variant"] == var) & (varAndProp["Sample"] == sample)]["Decision Variable"]
propConstrRHS.append( float( ( (data["{}".format(sample)])[locusName][0] )[var] ) )
piDotComb.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [-1]])
piDotComb_2.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [1]])
model.linear_constraints.add(lin_expr=piDotComb, rhs=propConstrRHS, senses=["L"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
model.linear_constraints.add(lin_expr=piDotComb_2, rhs=propConstrRHS, senses=["G"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
#Export some info for MATLAB use
#writeInfoToCsv()
''' ================================== Solve ILP ========================================== '''
#model.write("borreliaLP.lp")
# model.set_results_stream(None)
model.solve()
# model.write("{}.lp".format(sample))
#options for searching more optimal solutions
# model.parameters.mip.pool.capacity.set(50)
# model.parameters.mip.pool.intensity.set(4)
# model.parameters.mip.limits.populate.set(100)
# model.parameters.mip.pool.absgap.set(0)
# model.parameters.mip.pool.replace.set(1)
# model.populate_solution_pool()
objvalue = model.solution.get_objective_value()
varNames = model.variables.get_names()
varValues = model.solution.get_values(varNames)
conclusion = pd.DataFrame(columns=["Decision Variable", "Value"])
conclusion["Decision Variable"] = varNames
conclusion["Value"] = varValues
error = conclusion[conclusion["Decision Variable"].str.contains("d_")]
nonZero_error = error[(error["Value"] <= -0.001) | (error["Value"] >= 0.001)]
if nonZero_error.shape[0] != 0:
print("^^^^^^^^^^^^^^^^^^^^^^^^^^^ error ^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print nonZero_error
# objStr = conclusion[conclusion["Decision Variable"].str.contains("^a")]["Decision Variable"].tolist()
# objProp = conclusion[conclusion["Decision Variable"].str.contains("^pi")]["Decision Variable"].tolist()
# objErr = conclusion[conclusion["Decision Variable"].str.contains("^d")]["Decision Variable"].tolist()
#
# objStr_coeff = model.objective.get_linear(objStr)
# objProp_coeff = model.objective.get_linear(objProp)
# objErr_coeff = model.objective.get_linear(objErr)
#
# sum_str = sum([val*coeff for val, coeff in itertools.izip(model.solution.get_values(objStr), objStr_coeff)])
# sum_prop = sum([val*coeff for val, coeff in itertools.izip(model.solution.get_values(objProp), objProp_coeff)] )
# sum_err = sum([val*coeff for val, coeff in itertools.izip(model.solution.get_values(objErr), objErr_coeff)] )
# print("Objective value: {}".format(objvalue))
# print("Strain component: {}".format(sum_str))
# print("Prop component: {}".format(sum_prop))
# print("Error component: {}".format(sum_err))
# print("Sum :{}".format(sum_str+sum_prop+sum_err))
return objvalue
'''
This function is needed only if compatibility and quality score filtering are not discriminative enough.
localILP returns the objective value of the ILP for a given distribution
This ILP only consider strains, not taking into consideration any proportions involved
Input:
sample, name of sample
loci, a list of loci
gene_solProp_dict, a dictionary with key=gene, values=dictionary where key=allele, value=proportion
reference, a dataframe of all the existing strains
Output:
solution_dict, dictionary where key=indices, value=dataframe related to that solution(information such as alleles at each locus)
objective_dict, dictionary where key=indices, value=objective value of the i-th solution
data, dictionary where key=sample name, value=dataframe which contains information about alleles and proportion at each locus
strains, dataframe of unique strains for later use of localLP
'''
def localILP(sample, loci, gene_solProp_dict, reference):
genesDF = pd.DataFrame(columns=loci)
for gene in loci:
genesDF[gene] = [gene_solProp_dict[gene]]
data = dict()
data[sample] = genesDF
data = roundProp(data)
newNameToOriName = dict()
namingIndex=1
for i in sorted(data.keys()):
newNameToOriName["s{}".format(namingIndex)] = i
data["s{}".format(namingIndex)] = data.pop(i)
namingIndex += 1
allSamples = data.keys()
#paramaters
#loci = ['clpA', 'clpX', 'nifS']
numLoci = len(loci)
#Get the combinations at all loci across all samples
strains, numOfComb = returnCombinationsAndNumComb(data, numLoci, loci)
uniqueStrains = strains.drop_duplicates(loci)
uniqueStrains = (uniqueStrains[loci]).reset_index(drop=True)
uniqueStrains["ST"] = uniqueStrains.index.values + 1 #assign indices for strains or each unique combinations
strains = strains.merge(uniqueStrains, indicator=True, how="left") #assign the index to the combinations(as strain data frame contains duplicated rows)
strains = strains.drop("_merge",1)
#weights and decision variables for proportion of strains. weight=0 if the strain is in reference, otherwise =1. Notice there will be duplications of strain types
#here because for proportions, we consider sample by sample rather than unique strain types
strainWeightDecVarDF = strains.merge(reference, indicator=True, how="left")
strainWeightDecVarDF["_merge"].replace(to_replace="both", value=0, inplace=True)
strainWeightDecVarDF["_merge"].replace(to_replace="left_only", value=1, inplace=True)
strainWeightDecVarDF = strainWeightDecVarDF.rename(columns = {"_merge":"Weights"})
strainWeightDecVarDF = strainWeightDecVarDF.drop_duplicates(loci)
retainCol = loci + ['Weights', 'ST']
strainWeightDecVarDF = strainWeightDecVarDF[retainCol].reset_index(drop=True)
strainWeightDecVarDF["Decision Variable"] = ["a{}".format(i) for i in range(1, strainWeightDecVarDF.shape[0] + 1)]
#Relate sample and strain decision variable
samp_decVar_DF = strains.merge(strainWeightDecVarDF, how="left")[loci+["Sample", "Decision Variable", "ST"]]
#For each allele, get a mapping of which strains it maps to
varSampToST = mapVarAndSampleToStrain(samp_decVar_DF, loci, allSamples)
'''==================================== Forming ILP here ================================================'''
#Form a CPLEX model
model = cplex.Cplex()
#minimize problem
model.objective.set_sense(model.objective.sense.minimize)
#add the decision variables for unqiue strain types
model.variables.add(obj=strainWeightDecVarDF['Weights'].values.tolist(), names=strainWeightDecVarDF['Decision Variable'], types = [model.variables.type.binary]* len(strainWeightDecVarDF['Weights'].values.tolist()))
#Add linear constraints where strains chosen are able to describe all alleles seen in all samples
#Add linear constraints where strains chosen are able to describe all alleles seen in all samples
descAllAlleleLHS = list()
for locusName in varSampToST:
varSampToSTDict = varSampToST[locusName][0]
for (var, sample) in varSampToSTDict:
strainTypes = varSampToSTDict[(var, sample)]
strainDecVar = samp_decVar_DF[(samp_decVar_DF["ST"].isin(strainTypes)) & (samp_decVar_DF["Sample"] == "{}".format(sample))]["Decision Variable"].tolist()
descAllAlleleLHS.append([strainDecVar, [1]*len(strainDecVar)])
model.linear_constraints.add(lin_expr=descAllAlleleLHS, rhs=[1]*len(descAllAlleleLHS), senses=["G"]*len(descAllAlleleLHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(descAllAlleleLHS))])
# model.solve()
#options for searching more optimal solutions
#model.parameters.mip.pool.capacity.set(10)
# model.set_results_stream(None)
model.parameters.mip.pool.intensity.set(4)
# model.parameters.mip.limits.populate.set(50)
model.parameters.mip.pool.absgap.set(0)
model.parameters.mip.pool.replace.set(1)
model.populate_solution_pool()
solution_dict = dict()
objective_dict = dict()
for i in range(model.solution.pool.get_num()):
objvalue = model.solution.pool.get_objective_value(i)
objective_dict[i] = objvalue
varNames = model.variables.get_names()
varValues = model.solution.pool.get_values(i,varNames)
conclusion = pd.DataFrame(columns=["Decision Variable", "Value"])
conclusion["Decision Variable"] = varNames
conclusion["Value"] = varValues
strainInfo = conclusion.merge(strainWeightDecVarDF[strainWeightDecVarDF["Decision Variable"].isin(varNames)])
strainInfo["New/Existing"] = ["Existing" if w==0 else "New" for w in strainInfo["Weights"].tolist()]
strainsNeeded = (strainInfo[strainInfo["Value"] == 1][loci + ["ST", "Weights"]])
strainsNeeded.reset_index(drop=True, inplace=True)
solution_dict[i] = strainsNeeded
# print("Objective value: {}".format(objective_value))
return solution_dict, objective_dict, data, strains, newNameToOriName
'''
This function is needed only if compatibility and quality score filtering are not discriminative enough.
localLP returns the objective value of the LP for a given solution and the distribution.
This function takes solution from localILP and consider the effect of proportions
Input:
solution, dataframe which contains alleles at each locus for a solution
data, see localILP
strains, see localILP
reference, dataframe of existing strains
loci, a list of locus
Output:
objvalue, objective value for this solution in this LP
feasible, indicator whether this solution is feasible
'''
def localLP(solution, data, strains, reference, loci, newNameToOriName):
#paramaters
propFormat = 1 #proportion in percentage or fraction
#loci = ['clpA', 'clpX', 'nifS']
numLoci = len(loci)
#read data for samples and reference
lociNames = list(reference.columns.values)
numReference = reference.shape[0]
allSamples = data.keys()
#Get proportions of variants at different locus for each sample
varAndProp = returnVarAndProportions(data)
#Add propportion variables
proportionWeightDecVarDF = strains.merge(solution, how='left', indicator=True)
proportionWeightDecVarDF = proportionWeightDecVarDF[proportionWeightDecVarDF["_merge"] == "both"]
proportionWeightDecVarDF.drop(["_merge"], axis=1, inplace=True)
proportionWeightDecVarDF.reset_index(drop=True, inplace=True)
#For each variants, get a mapping of which strains it maps to. Only consider those strains in given solution
varSampToST = mapVarAndSampleToStrain(proportionWeightDecVarDF[loci+["Sample", "ST"]], loci, allSamples)
#Add proportion variables names
for samp in allSamples:
thisSample = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Sample']
propNameTemp = ["pi_%s_%d" %t for t in itertools.izip(thisSample, range(1,1+thisSample.shape[0]))]
#shorter name as CPLEX can't hold name with >16 char. Use last 3 digits of sample name to name decision variables i.e. SRR2034333 -> use 333
propNameTemp = [ele.replace("pi_{}".format(samp), "pi_{}".format(samp)) for ele in propNameTemp]
proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp, 'Decision Variable'] = propNameTemp
''' ===================================== Forming LP here =================================================== '''
#Form a CPLEX model
model = cplex.Cplex()
#minimize problem
model.objective.set_sense(model.objective.sense.minimize)
#add the decision variables for unqiue strain types
model.variables.add(obj=proportionWeightDecVarDF['Weights'].values.tolist(), lb=[0]*proportionWeightDecVarDF.shape[0], ub=[propFormat]*proportionWeightDecVarDF.shape[0], names=proportionWeightDecVarDF['Decision Variable'], types = [model.variables.type.continuous]* len(proportionWeightDecVarDF['Weights'].values.tolist()))
#add linear constraints such that for each sample, the sum of the proportions of its variants combination = 1
propVarSumTo1 = list()
for samp in allSamples:
temp = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Decision Variable'].tolist()
propVarSumTo1.append([temp, [1]* len(temp)])
model.linear_constraints.add(lin_expr=propVarSumTo1, rhs=[propFormat]*len(propVarSumTo1), senses=["E"]*len(propVarSumTo1), names=["c{0}".format(i+1) for i in range(len(propVarSumTo1))])
#add error variables and linear constraints related to error terms
#create error variable names
varAndProp["Decision Variable"] = ["d_{}_".format(samp) for samp in varAndProp["Sample"].tolist() ]
varAndProp["Decision Variable"] = varAndProp["Decision Variable"] + varAndProp["Variant"]
#add error variables
#errorThres = 0.10
model.variables.add(obj=[1]*varAndProp.shape[0], names=varAndProp["Decision Variable"].tolist(), lb=[0]*varAndProp.shape[0], ub= [errorThres]*varAndProp.shape[0], types=[model.variables.type.continuous]*varAndProp.shape[0])
#add linear constraints such that for each sample, sum of pi_ik \dot V_ik (proportion \dot matrix representation) across all combinations = Proportion matrix
piDotComb = list()
piDotComb_2 = list()
propConstrRHS = list()
for locusName in varSampToST:
temp=list()
varSampToSTDict = varSampToST[locusName][0]
for (var, sample) in varSampToSTDict:
strainTypes = varSampToSTDict[(var, sample)]
propDecVar = proportionWeightDecVarDF[(proportionWeightDecVarDF["ST"].isin(strainTypes)) & (proportionWeightDecVarDF["Sample"] == "{}".format(sample))]["Decision Variable"]
errorDecVar = varAndProp[(varAndProp["Variant"] == var) & (varAndProp["Sample"] == sample)]["Decision Variable"]
propConstrRHS.append( float( ( (data["{}".format(sample)])[locusName][0] )[var] ) )
piDotComb.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [-1]])
piDotComb_2.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [1]])
model.linear_constraints.add(lin_expr=piDotComb, rhs=propConstrRHS, senses=["L"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
model.linear_constraints.add(lin_expr=piDotComb_2, rhs=propConstrRHS, senses=["G"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
''' ==== Solve ==== '''
model.set_problem_type(0) #set to LP problem
# model.set_results_stream(None)
# model.set_error_stream(None)
# model.write("a.lp")
model.solve()
# print model.solution.get_status_string()
feasible = False
if model.solution.get_status() == 1:
objvalue = model.solution.get_objective_value()
feasible = True
else:
objvalue= -1
objvalue = model.solution.get_objective_value()
return objvalue, feasible
'''
Return the distribution which optimizes the local MILP (If more than 1, choose the one which is returned first)
Input:
samp, sample name
aTuple, tuple representing which distribution to consider
aDict, a dictionary where key=gene, value=a dictionary where key=solution indices, value=a dictionary where key=allele, value=proportion of the allele
This is confusing but here is an example: aDict = {clpA: {0:{clpA_1:0.5, clpA_2:0.5}, 1:{clpA_1:1.0}, 1:{...} }
clpX: {...}, ...}
loci, a list of locus
reference, dataframe of existing strains
option, "all" if all objective components, "noPropAndErr" if omit proportion and error terms
Output:
comb_minObjVal_dict, a dictionary where key=gene, value=a dictionary where key=allele, value=proportion
'''
def localMinimizer(samp, aTuple, aDict, loci, reference, option, timelimit, gap):
track = 1
objValue_list = list()
print("\nNumber of combinations to run: {}\n".format(len(aTuple)))
for combin in aTuple:
print("\nxxxxxxxxxxxxxxxxx Combination : {} xxxxxxxxxxxxxxxxxxxxxxxxxxxx\n".format(track))
comb_dict = {gene: aDict[gene][i] for (gene, i) in itertools.izip(loci, combin)}
objVal = localMILP(samp, loci, comb_dict, reference, option, timelimit, gap)
objValue_list.append(objVal)
track += 1
print("Objective Value: {}".format(objValue_list))
#Choose the combination which has the lowest objective value
minObjValIndex_list = np.argwhere(objValue_list == np.amin(objValue_list))
minObjValIndex_list = minObjValIndex_list.flatten().tolist()
if len(minObjValIndex_list) > 1:
print("@@@@@@@@@@@@@@@@@@@@@@@ You have more than 1 distribution having same objective value @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
minObjValIndex = minObjValIndex_list[0]
comb_minObjVal = aTuple[minObjValIndex]
comb_minObjVal_dict = {gene: aDict[gene][i] for (gene, i) in itertools.izip(loci, comb_minObjVal)}
return comb_minObjVal_dict
'''
Heuristic: Return the distribution which optimizes the local ILP first, then the local LP (If more than 1, choose the one which is returned first)
Input:
samp, sample name
aTuple, tuple representing which distribution to consider
aDict, a dictionary where key=gene, value=a dictionary where key=solution indices, value=a dictionary where key=allele, value=proportion of the allele
This is confusing but here is an example: aDict = {clpA: {0:{clpA_1:0.5, clpA_2:0.5}, 1:{clpA_1:1.0}, 1:{...} }
clpX: {...}, ...}
loci, a list of locus
reference, dataframe of existing strains
Output:
comb_minObjVal_dict, a dictionary where key=gene, value=a dictionary where key=allele, value=proportion
'''
def localMinimizer_sep(samp, aTuple, aDict, loci, reference):
track = 1
objValue_list = list()
checkAllComb_feasibility = False
print("\nNumber of combinations to run: {}\n".format(len(aTuple)))
for combin in aTuple:
print("\nxxxxxxxxxxxxxxxxx Combination : {} xxxxxxxxxxxxxxxxxxxxxxxxxxxx\n".format(track))
comb_dict = {gene: aDict[gene][i] for (gene, i) in itertools.izip(loci, combin)}
solution_dict, ilp_objective_dict, data, strains,newNameToOriName = localILP(samp, loci, comb_dict, reference)
feasible_sol = list()
lp_objective_dict = dict()
# print solution_dict
infeasibility = 0
for i in solution_dict.keys():
try:
objvalue, feasible = localLP(solution_dict[i], data, strains, reference, loci, newNameToOriName)
if feasible == False:
infeasibility += 1
else:
feasible_sol.append(i)
lp_objective_dict[i] = objvalue
except cplex.exceptions.errors.CplexSolverError as e:
infeasibility += 1
if infeasibility == len(solution_dict):
print ("This combination has no feasible solutions")
else:
min_obj = np.inf
for j in feasible_sol:
if (ilp_objective_dict[j] + lp_objective_dict[j]) < min_obj:
min_obj = ilp_objective_dict[j] + lp_objective_dict[j]
objValue_list.append(min_obj)
print("Objective value: {}".format(min_obj))
track += 1
checkAllComb_feasibility = True
if checkAllComb_feasibility == False:
return -1
print("Objective Value: {}".format(objValue_list))
#Choose the combination which has the lowest objective value
minObjValIndex_list = np.argwhere(objValue_list == np.amin(objValue_list))
minObjValIndex_list = minObjValIndex_list.flatten().tolist()
if len(minObjValIndex_list) > 1:
print("@@@@@@@@@@@@@@@@@@@@@@@ You have more than 1 distribution having same objective value @@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
minObjValIndex = minObjValIndex_list[0]
comb_minObjVal = aTuple[minObjValIndex]
comb_minObjVal_dict = {gene: aDict[gene][i] for (gene, i) in itertools.izip(loci, comb_minObjVal)}
return comb_minObjVal_dict
'''
Return tuples which are explainable by the existing strains
Input:
aTuple, a tuple of integer which represents which solution to consider at each locus
gene_solProp_dict, see localMinimizer_sep
loci, a list of locus
reference, dataframe of existing strains
Output:
compatible_tuples, a list of compatible tuples
'''
def compatibleFilter(aTuple, gene_solProp_dict, loci, reference):
compatible_tuples = list()
for combin in aTuple:
comb_dict = {gene: gene_solProp_dict[gene][i] for (gene, i) in itertools.izip(loci, combin)}
temp_boolean = True
for allele in loci:
temp_boolean = temp_boolean & reference[allele].isin(comb_dict[allele].keys())
if sum(temp_boolean) == 0:
break
if sum(temp_boolean) != 0:
compatible_tuples.append(combin)
return compatible_tuples
''' ====================================== Functions related to strain prediction ======================================================= '''
'''Create a function to read all data files and return a dictionary where keys are the sample names,
values are dataframes which contain the information about variants and proportions at different loci.
It also return total number of samples and starting sample number(based on last 3 digits)
Input
dataFilePath: File path that contains all your sample folders. dataFilePath should only contain directories of samples and
a reference csv
lociOrder: a list that contains order of columns that you want
option: "all" if running on all samples
'''
def readData(dataFilePath, lociOrder, option):
data = dict()
sampleFold = list()
if option == "all": #dataFilePath = ...../variantsAndProp
for folder in os.listdir(dataFilePath):
if not folder.endswith(".csv"):
sampleFold.append(folder)
else: #dataFilePath = ..../variantsAndProp/SRR2034333
sampleFold = [dataFilePath.split("/")[-1]]
numSamples = len(sampleFold)
# startingSamp = min([int(i[-3:]) for i in sampleFold])
for folder in sampleFold:
data["{}".format(folder)] = pd.DataFrame(columns=lociOrder) #require column to be a specfic order based on lociOrder
if option == "all":
sampleFilePath = dataFilePath + "/{}".format(folder)
else:
sampleFilePath = dataFilePath
dirs= os.listdir(sampleFilePath)
csvFiles = [i for i in dirs if i.endswith("proportions.csv")]
temp = re.compile('(.*)_proportions.csv') #grab gene name
for f in csvFiles:
gene = temp.findall(f)
reader = csv.reader(open(sampleFilePath+"/"+f, 'r'))
#store variants and respective proportions in a dictionary
d = dict()
for variant, proportion in reader:
d[variant] = proportion
#wrap dictionary with a list for storage in dataframe
alist =[d]
(data["{}".format(folder)])[gene[0]] = alist
return data, numSamples
#def readDataWithoutProp(dataFilePath, lociOrder, option):
# data = dict()
# sampleFold = list()
#
# if option == "all": #dataFilePath = ...../variantsAndProp
# for folder in os.listdir(dataFilePath):
# if not folder.endswith(".csv"):
# sampleFold.append(folder)
# else: #dataFilePath = ..../variantsAndProp/SRR2034333
# sampleFold = [dataFilePath.split("/")[-1]]
#
# numSamples = len(sampleFold)
# startingSamp = min([int(i[-3:]) for i in sampleFold])
#
# #data is a dictionary in which key=sample, value=a dataframe which has entries=list and the columns are loci
# for folder in sampleFold:
# data[folder] = pd.DataFrame(columns=lociOrder)
# if option == "all":
# sampleFilePath = dataFilePath + "/{}".format(folder)
# else:
# sampleFilePath = dataFilePath
#
# dirs= os.listdir(sampleFilePath)
# csvFiles = [i for i in dirs if i.endswith("proportions.csv")]
# temp = re.compile('(.*)_proportions.csv') #grab gene name
# for f in csvFiles:
# gene = temp.findall(f)
# reader = csv.reader(open(sampleFilePath+"/"+f, 'r'))
#
# #store alleles in a list
# allele_list = list()
# for row in reader:
# allele_list.append(row[0])
#
# (data[folder])[gene[0]] = [allele_list]
#
# return data, numSamples, startingSamp
'''
Return data(dictionary) with all proportions rounded to 3 dp
Input: data(dictionary) loaded previously
'''
def roundProp(data):
roundedData = dict()
locus = list(data[data.keys()[0]].columns.values)
for sample in data:
#sample is a key, sampleDF is a dataframe
sampleDF = data[sample]
roundedSampleDF = pd.DataFrame(columns=locus)
track=0 #track locus name
for column in sampleDF.values[0]: #column is a dictionary containing {variant : proportion}
prop = column.values()
keys = column.keys()
prop = [float(p)*1000 for p in prop]
prop = np.array(prop)
frac = prop - np.array([int(p) for p in prop])
numFracRoundUp = int(round(sum(frac)))
sortedIndices = frac.argsort()[::-1] #descending order
roundUpIndices = sortedIndices[0:numFracRoundUp] #grab first numFracRoundUp indices to round up
mask = np.zeros_like(prop, dtype=bool) #use to identify which to round up and round down
mask[roundUpIndices] = True
prop[mask] = np.ceil(prop[mask])
prop[~mask] = np.floor(prop[~mask])
prop = prop/1000.0
prop = ["{:.3f}".format(i) for i in prop] #convert back to string with 3dp
#reconstruct dataframe having same format as input data
roundedDict = dict(itertools.izip(keys,prop))
roundedSampleDF[locus[track]] = [roundedDict]
track = track+1
roundedData[sample] = roundedSampleDF
return roundedData
'''
Check the proportions at each locus for each sample sum to 1. Raise systemExit exception and report corresponding (sample, locus) which do not sum to 1
Input:
data: dictionary, data file loaded previously
'''
def checkProp(data, propFormat):
print("....Checking sum of proportions at each locus for each sample are equal to 1....")
report = list()
for sample in data:
sampleDF = data[sample]
for column in sampleDF.values[0]:
match = re.search("(.*)_", (column.keys())[0])
locus = match.group(1) #get locus name
prop = column.values()
prop = [float(p) for p in prop]
if round(sum(prop), 4) != float(propFormat):
report.append((sample, locus))
try:
if report:
raise SystemExit
except:
sys.exit("The following (sample, locus) pairs have sum of proportions not equal to 1.\n {0}".format(report))
print("Sum of proportions at each locus for each sample are equal to 1\n")
'''Return the unique combinations of variants at all loci across all samples
data: Data file preloaded previously
def uniqueCombinations(data):
uniqueStrains = list()
for sample in data:
#key = sample name, value = dataframe
sampleDF = data[sample]
variantsAtAllLoci = list()
#only one row in the dataframe
for column in sampleDF.values[0]:
variantsAtAllLoci.append(column.keys())
combination = itertools.product(*variantsAtAllLoci)
for strain in combination:
uniqueStrains.append(strain)
uniqueStrains = list(set(uniqueStrains))
return uniqueStrains
'''
'''Returns a dataframe of combinations of the variants at all loci, the matrix representation of the combination
and the sample that contains it. Also, it returns the total number of combinations that each sample has
Input
data: dictionary, information of all samples preloaded previously
numLoci: number of loci
loci: list of locus
'''
def returnCombinationsAndNumComb(data, numLoci, loci):
strains = list()
numOfComb = dict()
previousNum = 0
# numAllele = dict()
for sample in data:
#key = sample name, value = dataframe
sampleDF = data[sample]
variantsAtAllLoci = list()
#Use for getting matrix representation of combinations
#only one row in the dataframe
for locus in sampleDF.columns.tolist():
variantsAtAllLoci.append(sampleDF[locus][0].keys())
# numAllele[(sample, locus)] = len(sampleDF[locus][0].keys())
combination = itertools.product(*variantsAtAllLoci) #produce combinations
combinationIndices = [list(comb) for comb in itertools.product(*[range(len(var)) for var in variantsAtAllLoci])]
for strain,strainIndex in itertools.izip(combination,combinationIndices):
temp = list(strain)
temp.append(sample) #add sample name
#matrixRep = np.zeros(shape=(maxNumVar, numLoci))
#matrixRep[strainIndex, np.arange(numLoci)] = 1
#temp.append(matrixRep)
strains.append(temp)
#Get the total number of combinations for each sample
numOfComb[sample] = len(strains) - previousNum
previousNum = numOfComb[sample]
strains = pd.DataFrame(strains, columns=(loci+['Sample']))
return strains, numOfComb
'''Returns a dictionary where keys=sample name and values=numpy matrix representation of the proportions for the sample
Input
data: dictionary, information of all samples preloaded previously
numLoci: number of loci
def returnProportions(data, numLoci):
proportions = dict()
for sample in data:
sampleDF = data[sample]
maxNumVar = 0
for column in sampleDF.values[0]:
numVar = len(column.keys())
if numVar > maxNumVar:
maxNumVar = numVar
proportions[sample] = np.zeros(shape=(maxNumVar, numLoci))
i = 0
for column in sampleDF.values[0]:
numVar = len(column.keys())
(proportions[sample])[0:numVar, i] = np.transpose(np.asarray(column.values(), dtype='float64'))
i = i+1
return proportions
'''
'''
Return a data frame with variant, locus, variant's proportions, the sample referring to as columns
Input:
data: dictionary, data loaded previously
'''
def returnVarAndProportions(data):
varAndProp = pd.DataFrame(columns=["Variant", "Locus", "Proportion", "Sample"])
var=list()
prop=list()
loc = list()
samp = list()
for sample in data:
#d is a dataframe
d = data[sample]
for column in d:
#d[column][0] is a dictionary {var: prop}
var.append((d[column])[0].keys())
prop.append((d[column])[0].values())
samp.append([sample]*len((d[column])[0].keys()))
loc.append([column]*len((d[column])[0].keys()))
var = [item for sublist in var for item in sublist]
prop = [item for sublist in prop for item in sublist]
prop = [float(i) for i in prop]
loc = [item for sublist in loc for item in sublist]
samp = [item for sublist in samp for item in sublist]
varAndProp["Variant"] = var
varAndProp["Locus"] = loc
varAndProp["Proportion"] = prop
varAndProp["Sample"] = samp
return varAndProp
'''
Return a data frame with locus as columns, and each column contains a dictionary(wrapped by a list) which shows the indices of unique strains that each variants map to
Input:
uniqueStr: a dataframe with unique strain types
loci : a list containing locus name
def mapVarToStrain(uniqueStr, loci):
varToStr = pd.DataFrame(columns=loci)
for name in loci:
uniqueVar = uniqueStr.drop_duplicates(name)
uniqueVar = (uniqueVar[name]).reset_index(drop=True)
var = dict()
for row in uniqueVar:
print row
strList = uniqueStr[uniqueStr[name].isin([row])]["ST"]
var[row] = strList
varToStr[name] = [var]
return varToStr
'''
'''
Return a data frame with locus as columns, and each column contains a dictionary(wrapped by a list) which shows the indices of unique strains(ST) that each key=(variants, sample) maps to
Input:
strain: a dataframe with variants combination
loci : a list containing locus name
start: starting sample number
numSamp: number of samples
'''
def mapVarAndSampleToStrain(strain, loci, allSamples):
varToStr = pd.DataFrame(columns=loci)
for name in loci:
varDF = strain.drop_duplicates(name) #unique variants at a locus
varDF = (varDF[name]).reset_index(drop=True)
varDict = dict()
for sample, var in list(itertools.product(allSamples, varDF.tolist())):
#grab strain types which (sample, var) maps to
strList = strain[(strain[name]==var) & (strain["Sample"]==sample)]["ST"]
if len(strList) != 0: #if not empty
varDict[(var, sample)] = strList
varToStr[name] = [varDict]
return varToStr
#def writeInfoToCsv():
# proportionWeightDecVarDF.to_csv('proportion.csv')
# strainWeightDecVarDF.to_csv('strain.csv')
# varAndProp.to_csv('error.csv')
#
# piDotCombDF = pd.DataFrame([propDecVar[0] for propDecVar in piDotComb])
# piDotCombDF.to_csv('piDotComb.csv')
# pd.DataFrame(propConstrRHS).to_csv('propConstrRHS.csv')
#def hammingWeightsStrain(df, loci,reference):
# ref_copy = reference[:]
#
# for l in loci:
# df.loc[:,l] = df.loc[:,l].str.split('_').str[1]
# ref_copy.loc[:,l] = ref_copy.loc[:,l].str.split('_').str[1]
#
# matrix_ref = ref_copy[loci].as_matrix()
#
# index_hamming_dict = {ind:1.0 for ind in df.index}
#
# for row_weight in df[loci].itertuples():
# ind = row_weight[0]
# vec = list(row_weight[1:])
#
# for row_ref in matrix_ref:
# hd = hamming(vec, row_ref)
#
# if hd < 0.5:
# index_hamming_dict[ind] = 0.5
# break
#
# return pd.DataFrame(data=index_hamming_dict.values(), index=index_hamming_dict.keys())
def computeDist_byGene(allele1, allele2, gene, distMat_dict):
try:
d = distMat_dict[gene][0].loc[allele1, allele2]
except KeyError:
print("{0} or {1} allele is not in the distance matrix dataframe".format(allele1, allele2))
d = -1
return d
'''
Compute the minimum distance between a strain and strains in library
Assuming strains in reference are indexed with [gene]_[index] i.e. clpA_1, clpX_5,... As we use a df.isin() method, order
does not take into account.
'''
def computeStrDist(strain, distMat_dict, loci, reference):
sorted_strain = sorted(strain)
minDist = float('Inf')
for row in reference[loci].itertuples(index=False):
sorted_ref = sorted(list(row))
assert(len(sorted_ref) == len(sorted_strain)), "Strains are of different length"
temp_dist = 0
innerLoopBreak = False
for i in range(len(sorted_ref)):
gene = sorted_ref[i].split("_")[0]
d = computeDist_byGene(sorted_ref[i], sorted_strain[i], gene, distMat_dict)
if d == -1:
innerLoopBreak = True
break
else:
temp_dist += d
if innerLoopBreak == True:
pass
else:
if temp_dist < minDist:
minDist = temp_dist
return minDist
'''
Compute the weights for each strain. 0 if existing, min d(s,s') for novel strain s where s' is an existing strain
'''
def computeMinDistWeights(strainWeightDecVarDF, distMat_dict, loci, reference):
str_df = strainWeightDecVarDF[loci]
weight_list = list()
for strain in str_df.itertuples(index=False):
if reference[loci].isin( list(strain) ).all(axis=1).sum() == 1:
weight_list.append(0)
else:
weight_list.append( computeStrDist(list(strain), distMat_dict, loci, reference) )
return weight_list
'''
Assuming each edit distance matrix is named as: editDistanceMatrix_[gene].csv
'''
def returnDistMat_dict(pathToDistMat, loci):
distMat_dict = dict()
for l in loci:
temp_df = pd.read_csv( os.path.join(pathToDistMat, 'editDistanceMatrix_{}.csv'.format(l)), sep=",").set_index("level_0")
distMat_dict[l] = [ temp_df ]
return distMat_dict
'''
Predict strains using MILP and output in csv file
Input:
dataPath, absolute path to directory containing samples' alleles and proportions
pathToDistMat, path to directory containing edit distances matrix for each gene
refStrains, path to strain_ref.txt
outputPath, path to output csv file
loci, list of locus
objectiveOption, "all" means all objective components and "noPropAndErr" means omitting proportion and error terms
globalILP_option, "all" if running on all samples
'''
def strainSolver(dataPath, refStrains, outputPath, objectiveOption, globalILP_option='all', timelimit=600, gap=8,
loci=["clpA","clpX","nifS","pepX","pyrG","recG","rplB","uvrA"],pathToDistMat=None):
''' ============================================== Data handling ====================================================== '''
#paramaters
propFormat = 1 #proportion in percentage or fraction
#loci = ['clpA', 'clpX', 'nifS']
numLoci = len(loci)
#read data for samples and reference
data, numSamples = readData(dataPath,loci, globalILP_option)
newNameToOriName = dict()
namingIndex=1
for i in sorted(data.keys()):
newNameToOriName["s{}".format(namingIndex)] = i
data["s{}".format(namingIndex)] = data.pop(i)
namingIndex += 1
reference = pd.read_csv(refStrains,sep="\t",usecols=range(1,numLoci+1))
lociNames = list(reference.columns.values)
numReference = reference.shape[0]
allSamples = data.keys()
#check proportions sum to 100
checkProp(data, propFormat)
#round the proportions to 3 decimal places
data = roundProp(data)
#As reference only contains numbers as entries, add gene name to the variants for better identification
for name in lociNames:
reference["%s" %name] = name + "_" + reference["%s" %name].astype(str)
#Get proportions of variants at different locus for each sample
varAndProp = returnVarAndProportions(data)
#Get the combinations at all loci across all samples
strains, numOfComb = returnCombinationsAndNumComb(data, numLoci, loci)
uniqueStrains = strains.drop_duplicates(loci)
uniqueStrains = (uniqueStrains[loci]).reset_index(drop=True)
uniqueStrains["ST"] = uniqueStrains.index.values + 1 #assign indices for strains or each unique combinations
strains = strains.merge(uniqueStrains, indicator=True, how="left") #assign the index to the combinations(as strain data frame contains duplicated rows)
strains = strains.drop("_merge",1)
#For each variants, get a mapping of which strains it maps to
varSampToST = mapVarAndSampleToStrain(strains, loci, allSamples)
#weights and decision variables for proportion of strains. weight=0 if the strain is in reference, otherwise =1. Notice there will be duplications of strain types
#here because for proportions, we consider sample by sample rather than unique strain types
proportionWeightDecVarDF = strains.merge(reference, indicator=True, how="left")
proportionWeightDecVarDF["_merge"].replace(to_replace="both", value=0, inplace=True)
proportionWeightDecVarDF["_merge"].replace(to_replace="left_only", value=1, inplace=True)
proportionWeightDecVarDF = proportionWeightDecVarDF.rename(columns = {"_merge":"Weights"})
#Add proportion decision variable names
proportionWeightDecVarDF["Decision Variable"] = np.nan
for samp in allSamples:
thisSample = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Sample']
propNameTemp = ["pi_%s_%d" %t for t in itertools.izip(thisSample, range(1,1+thisSample.shape[0]))]
#shorter name as CPLEX can't hold name with >16 char. Use last 3 digits of sample name to name decision variables i.e. SRR2034333 -> use 333
propNameTemp = [ele.replace("pi_{}".format(samp), "pi_{}".format(samp[-3:])) for ele in propNameTemp]
proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp, 'Decision Variable'] = propNameTemp
#weights and decision variables for unique strain types, weight=0 if strain is in reference, otherwise=1. no duplications
strainWeightDecVarDF = proportionWeightDecVarDF.drop_duplicates(loci)
#temp_changeWeightDF = hammingWeightsStrain(strainWeightDecVarDF[strainWeightDecVarDF["Weights"] == 1], loci, reference)
#strainWeightDecVarDF.loc[temp_changeWeightDF.index, "Weights"] = temp_changeWeightDF.values
retainCol = loci + ['Weights', 'ST']
strainWeightDecVarDF = strainWeightDecVarDF[retainCol].reset_index(drop=True)
if pathToDistMat == None:
pass
else:
distMat_dict = returnDistMat_dict(pathToDistMat, loci)
minDist_W = computeMinDistWeights(strainWeightDecVarDF, distMat_dict, loci, reference)
max_ed = max(minDist_W)
minDist_W = [float(i)/max_ed for i in minDist_W]
strainWeightDecVarDF["Weights"] = minDist_W
strainWeightDecVarDF["Decision Variable"] = ["a{}".format(i) for i in range(1, strainWeightDecVarDF.shape[0] + 1)]
'''==================================== Forming ILP here ================================================'''
#Form a CPLEX model
model = cplex.Cplex()
#Some bound on cplex solver when gap finds it hard to converge
timelim_cb = model.register_callback(TimeLimitCallback)
timelim_cb.starttime = model.get_time()
timelim_cb.timelimit = timelimit
timelim_cb.acceptablegap = gap
timelim_cb.aborted = False
model.parameters.mip.strategy.file.set(3) #node file storage on disk and compressed
model.parameters.mip.strategy.variableselect.set(3) #Strong branching for variable selection
#minimize problem
model.objective.set_sense(model.objective.sense.minimize)
#add the decision variables for unqiue strain types
model.variables.add(obj=strainWeightDecVarDF['Weights'].values.tolist(), names=strainWeightDecVarDF['Decision Variable'], types = [model.variables.type.binary]* len(strainWeightDecVarDF['Weights'].values.tolist()))
#add proportions decision variables
if objectiveOption == "noPropAndErr" or "noProp":
model.variables.add(lb=[0]*proportionWeightDecVarDF.shape[0], ub=[propFormat]*proportionWeightDecVarDF['Weights'].shape[0], names=proportionWeightDecVarDF["Decision Variable"], types=[model.variables.type.continuous] * len(proportionWeightDecVarDF['Weights'].values.tolist()))
else:
model.variables.add(obj=[i for i in proportionWeightDecVarDF['Weights'].values.tolist()],lb=[0]*proportionWeightDecVarDF.shape[0],ub=[propFormat]*proportionWeightDecVarDF['Weights'].shape[0], names=proportionWeightDecVarDF["Decision Variable"], types=[model.variables.type.continuous] * len(proportionWeightDecVarDF['Weights'].values.tolist()))
#add linear constraints such that for each sample, the sum of the proportions of its variants combination = 1
propVarSumTo1 = list()
for samp in allSamples:
temp = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Decision Variable'].tolist()
propVarSumTo1.append([temp, [1]* len(temp)])
model.linear_constraints.add(lin_expr=propVarSumTo1, rhs=[propFormat]*len(propVarSumTo1), senses=["E"]*len(propVarSumTo1), names=["c{0}".format(i+1) for i in range(len(propVarSumTo1))])
#add linear constraints such that each decision variable a_i must be at least pi_jk in which pi_jk is the proportion of V_jk and V_jk=a_i
#By this, if we use any of the pi, we force a_i to be 1
indicLargerPropDF = pd.DataFrame(columns=["ST","Indicator"])
indicLargerPropDF["ST"] = strainWeightDecVarDF["ST"]
indicLargerPropDF["Indicator"] = strainWeightDecVarDF["Decision Variable"]
indicLargerPropDF = (indicLargerPropDF.merge(proportionWeightDecVarDF, indicator=True, how="left", on="ST"))[["ST","Indicator","Decision Variable"]]
indicLargerPropDF.rename(columns={"Decision Variable": "Proportion Variable"}, inplace=True)
indicMinusProp = list()
for i,pi in itertools.izip(indicLargerPropDF["Indicator"].tolist(), indicLargerPropDF["Proportion Variable"].tolist()):
indicMinusProp.append([[i, pi],[propFormat, -1]])
model.linear_constraints.add(lin_expr=indicMinusProp, rhs=[0]*len(indicMinusProp), senses=["G"]*len(indicMinusProp), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(indicMinusProp))] )
#Also, add linear constraints such that a_i - average of pi_jk <= 0.999. Otherwise will have case that a_i=1 and for all pi_jk, pi_jk=0
indicMinusAvgPropLess1_DF = indicLargerPropDF.groupby("Indicator")["Proportion Variable"].apply(list).reset_index()
indic = indicMinusAvgPropLess1_DF["Indicator"].tolist()
pV = indicMinusAvgPropLess1_DF["Proportion Variable"].tolist()
indicMinusAvgPropLess1_LHS = list()
for i in range(len(indic)):
a_i = indic[i]
pi_i = pV[i]
temp = list()
size = len(pi_i)
temp.append(a_i)
coef = list()
coef.append(propFormat)
for j in range(size):
temp.append(pi_i[j])
coef.append(-1.0/size)
indicMinusAvgPropLess1_LHS.append([temp, coef])
tolerance = 0.01*propFormat*0.01 #how much tolerance we set for the upper bound
model.linear_constraints.add(lin_expr=indicMinusAvgPropLess1_LHS, rhs=[propFormat - tolerance]*len(indicMinusAvgPropLess1_LHS), senses=["L"]*len(indicMinusAvgPropLess1_LHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(indicMinusAvgPropLess1_LHS))])
model.linear_constraints.add(lin_expr=indicMinusAvgPropLess1_LHS, rhs=[0]*len(indicMinusAvgPropLess1_LHS), senses=["G"]*len(indicMinusAvgPropLess1_LHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(indicMinusAvgPropLess1_LHS))])
#add error variables and linear constraints related to error terms
#create error variable names
varAndProp["Decision Variable"] = ["d_{}_".format(samp[-3:]) for samp in varAndProp["Sample"].tolist() ]
varAndProp["Decision Variable"] = varAndProp["Decision Variable"] + varAndProp["Variant"]
#add error variables
#errorThres = 0.1
if objectiveOption == "noPropAndErr":
model.variables.add(names=varAndProp["Decision Variable"].tolist(), lb= [0]*varAndProp.shape[0], ub= [errorThres]*varAndProp.shape[0], types=[model.variables.type.continuous]*varAndProp.shape[0])
else:
model.variables.add(obj=[1]*varAndProp.shape[0], names=varAndProp["Decision Variable"].tolist(), lb= [0]*varAndProp.shape[0], ub= [errorThres]*varAndProp.shape[0], types=[model.variables.type.continuous]*varAndProp.shape[0])
#add linear constraints such that for each sample, sum of pi_ik \dot V_ik (proportion \dot matrix representation) across all combinations = Proportion matrix
piDotComb = list()
piDotComb_2 = list()
propConstrRHS = list()
for locusName in varSampToST:
temp=list()
varSampToSTDict = varSampToST[locusName][0]
for (var, sample) in varSampToSTDict:
strainTypes = varSampToSTDict[(var, sample)]
propDecVar = proportionWeightDecVarDF[(proportionWeightDecVarDF["ST"].isin(strainTypes)) & (proportionWeightDecVarDF["Sample"] == "{}".format(sample))]["Decision Variable"]
errorDecVar = varAndProp[(varAndProp["Variant"] == var) & (varAndProp["Sample"] == sample)]["Decision Variable"]
propConstrRHS.append( float( ( (data["{}".format(sample)])[locusName][0] )[var] ) )
piDotComb.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [-1]])
piDotComb_2.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [1]])
model.linear_constraints.add(lin_expr=piDotComb, rhs=propConstrRHS, senses=["L"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
model.linear_constraints.add(lin_expr=piDotComb_2, rhs=propConstrRHS, senses=["G"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
#Export some info for MATLAB use
#writeInfoToCsv()
''' ================================== Solve ILP ========================================== '''
#model.write("borreliaLP.lp")
# model.set_results_stream(None)
#Some bound on cplex solver when gap finds it hard to converge
timelim_cb = model.register_callback(TimeLimitCallback)
timelim_cb.starttime = model.get_time()
timelim_cb.timelimit = int(timelimit)
timelim_cb.acceptablegap = float(gap)
timelim_cb.aborted = False
model.solve()
#options for searching more optimal solutions
#model.parameters.mip.pool.capacity.set(10)
# model.parameters.mip.pool.intensity.set(4)
#model.parameters.mip.limits.populate.set(2100000000)
# model.parameters.mip.pool.absgap.set(0)
# model.parameters.mip.pool.replace.set(1)
# model.populate_solution_pool()
objvalue = model.solution.get_objective_value()
varNames = model.variables.get_names()
varValues = model.solution.get_values(varNames)
conclusion = pd.DataFrame(columns=["Decision Variable", "Value"])
conclusion["Decision Variable"] = varNames
conclusion["Value"] = varValues
print conclusion
strainInfo = conclusion.merge(strainWeightDecVarDF[strainWeightDecVarDF["Decision Variable"].isin(varNames)])
strainInfo["New/Existing"] = ["Existing" if w==0 else "New" for w in strainInfo["Weights"].tolist()]
strainsNeeded = (strainInfo[strainInfo["Value"] > 0.9][loci + ["ST", "New/Existing"]])
errorVariables = conclusion[conclusion["Decision Variable"].str.startswith("d_")]
errorVariables = errorVariables[errorVariables["Value"] != 0.0]
strainVariables = conclusion[conclusion["Decision Variable"].str.startswith("a")]
strainVariables = strainVariables[strainVariables["Value"] > 0.0]
strainVariables = strainVariables.merge(strainWeightDecVarDF)
strainVariables = strainVariables[strainVariables["Weights"] > 0]
propVariables = conclusion[conclusion["Decision Variable"].str.startswith("pi_")]
propVariables = propVariables[propVariables["Value"] > 0.0]
propVariables = propVariables.merge(proportionWeightDecVarDF)
propVariables = propVariables[propVariables["Weights"] > 0]
print("(Strain, Proportion, Error): ({0},{1},{2})".format(strainVariables["Value"].sum(), propVariables["Value"].sum(), errorVariables["Value"].sum()))
print("Total: {}".format(objvalue))
#output indices of all strains (New/Existing)
allStr = strainWeightDecVarDF[["ST", "Weights"] + loci]
allStr["New/Existing"] = ["Existing" if w==0 else "New" for w in allStr["Weights"].tolist()]
allStr.drop("Weights", 1, inplace=True)
allStr.to_csv("{0}/indexedStrains.csv".format(outputPath))
# for i in range(model.solution.pool.get_num()):
# objvalue = model.solution.pool.get_objective_value(i)
# varNames = model.variables.get_names()
# varValues = model.solution.pool.get_values(i,varNames)
# conclusion = pd.DataFrame(columns=["Decision Variable", "Value"])
# conclusion["Decision Variable"] = varNames
# conclusion["Value"] = varValues
# strainInfo = conclusion.merge(strainWeightDecVarDF[strainWeightDecVarDF["Decision Variable"].isin(varNames)])
# strainInfo["New/Existing"] = ["Existing" if w==0 else "New" for w in strainInfo["Weights"].tolist()]
# strainsNeeded = (strainInfo[strainInfo["Value"] > 0.9][loci + ["ST", "New/Existing"]])
# print strainsNeeded
output_dict = dict()
for samp in allSamples:
output = proportionWeightDecVarDF[proportionWeightDecVarDF["Sample"] == samp].merge(strainsNeeded).drop(["Weights", "Sample"],1)
output["Proportion"] = model.solution.get_values(output["Decision Variable"].tolist())
output = output[output["Proportion"] > 0.0]
output.drop("Decision Variable", axis=1, inplace=True)
output = output[["ST", "New/Existing"]+loci+["Proportion"]]
#print output
temp = output
output_dict[samp] = pd.DataFrame(temp)
output.to_csv("{0}/{1}_strainsAndProportions.csv".format(outputPath, newNameToOriName[samp]))
return output
'''
Solve the pure ILP instance of strain prediction
Input:
dataPath, path to directory containing samples' alleles and proportions
refStrains, path to strain_ref.txt
loci, a list of locus
globalILP_option, "all" if all samples
Output:
solution_dict, dictionary where key=indices, value=dataframe related to that solution(information such as alleles at each locus)
objective_dict, dictionary where key=indices, value=objective value of the i-th solution
data, dictionary where key=sample name, value=dataframe which contains information about alleles and proportion at each locus
strains, dataframe of unique strains for later use of localLP
'''
def minNewStrain(dataPath, refStrains, loci, globalILP_option):
#paramaters
#loci = ['clpA', 'clpX', 'nifS']
numLoci = len(loci)
#read data for samples and reference
data, numSamples = readData(dataPath,loci, globalILP_option)
newNameToOriName = dict()
namingIndex=1
for i in sorted(data.keys()):
newNameToOriName["s{}".format(namingIndex)] = i
data["s{}".format(namingIndex)] = data.pop(i)
namingIndex += 1
allSamples = data.keys()
reference = pd.read_csv(refStrains,sep="\t",usecols=range(1,numLoci+1))
lociNames = list(reference.columns.values)
#As reference only contains numbers as entries, add gene name to the variants for better identification
for name in lociNames:
reference["%s" %name] = name + "_" + reference["%s" %name].astype(str)
#Get the combinations at all loci across all samples
strains, numOfComb = returnCombinationsAndNumComb(data, numLoci, loci)
uniqueStrains = strains.drop_duplicates(loci)
uniqueStrains = (uniqueStrains[loci]).reset_index(drop=True)
uniqueStrains["ST"] = uniqueStrains.index.values + 1 #assign indices for strains or each unique combinations
strains = strains.merge(uniqueStrains, indicator=True, how="left") #assign the index to the combinations(as strain data frame contains duplicated rows)
strains = strains.drop("_merge",1)
#weights and decision variables for proportion of strains. weight=0 if the strain is in reference, otherwise =1. Notice there will be duplications of strain types
#here because for proportions, we consider sample by sample rather than unique strain types
strainWeightDecVarDF = strains.merge(reference, indicator=True, how="left")
strainWeightDecVarDF["_merge"].replace(to_replace="both", value=0, inplace=True)
strainWeightDecVarDF["_merge"].replace(to_replace="left_only", value=1, inplace=True)
strainWeightDecVarDF = strainWeightDecVarDF.rename(columns = {"_merge":"Weights"})
strainWeightDecVarDF = strainWeightDecVarDF.drop_duplicates(loci)
retainCol = loci + ['Weights', 'ST']
strainWeightDecVarDF = strainWeightDecVarDF[retainCol].reset_index(drop=True)
strainWeightDecVarDF["Decision Variable"] = ["a{}".format(i) for i in range(1, strainWeightDecVarDF.shape[0] + 1)]
#Relate sample and strain decision variable
samp_decVar_DF = strains.merge(strainWeightDecVarDF, how="left")[loci+["Sample", "Decision Variable", "ST"]]
#For each allele, get a mapping of which strains it maps to
varSampToST = mapVarAndSampleToStrain(samp_decVar_DF, loci, allSamples)
'''==================================== Forming ILP here ================================================'''
#Form a CPLEX model
model = cplex.Cplex()
#minimize problem
model.objective.set_sense(model.objective.sense.minimize)
#add the decision variables for unqiue strain types
model.variables.add(obj=strainWeightDecVarDF['Weights'].values.tolist(), names=strainWeightDecVarDF['Decision Variable'], types = [model.variables.type.binary]* len(strainWeightDecVarDF['Weights'].values.tolist()))
#Add linear constraints where strains chosen are able to describe all alleles seen in all samples
descAllAlleleLHS = list()
for locusName in varSampToST:
varSampToSTDict = varSampToST[locusName][0]
for (var, sample) in varSampToSTDict:
strainTypes = varSampToSTDict[(var, sample)]
strainDecVar = samp_decVar_DF[(samp_decVar_DF["ST"].isin(strainTypes)) & (samp_decVar_DF["Sample"] == "{}".format(sample))]["Decision Variable"].tolist()
descAllAlleleLHS.append([strainDecVar, [1]*len(strainDecVar)])
model.linear_constraints.add(lin_expr=descAllAlleleLHS, rhs=[1]*len(descAllAlleleLHS), senses=["G"]*len(descAllAlleleLHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(descAllAlleleLHS))])
# model.solve()
# model.write("a.lp")
# samp_decVar_DF.to_csv("a.csv")
#options for searching more optimal solutions
#model.parameters.mip.pool.capacity.set(10)
# model.set_results_stream(None)
model.parameters.mip.pool.intensity.set(4)
model.parameters.mip.limits.populate.set(50)
model.parameters.mip.pool.absgap.set(0)
model.parameters.mip.pool.replace.set(1)
model.populate_solution_pool()
solution_dict = dict()
objective_dict = dict()
for i in range(model.solution.pool.get_num()):
objvalue = model.solution.pool.get_objective_value(i)
objective_dict[i] = objvalue
varNames = model.variables.get_names()
varValues = model.solution.pool.get_values(i,varNames)
conclusion = pd.DataFrame(columns=["Decision Variable", "Value"])
conclusion["Decision Variable"] = varNames
conclusion["Value"] = varValues
strainInfo = conclusion.merge(strainWeightDecVarDF[strainWeightDecVarDF["Decision Variable"].isin(varNames)])
strainInfo["New/Existing"] = ["Existing" if w==0 else "New" for w in strainInfo["Weights"].tolist()]
strainsNeeded = (strainInfo[strainInfo["Value"] > 0.9][loci + ["ST", "Weights"]])
strainsNeeded.reset_index(drop=True, inplace=True)
solution_dict[i] = strainsNeeded
# print("Objective value: {}".format(objective_value))
return solution_dict, objective_dict, data, strains, newNameToOriName
'''
Solve the LP instance of strain prediction
Input:
solution, dataframe which contains alleles at each locus for a solution
data, see minNewStrain
strains, see minNewStrain
reference, dataframe of existing strains
loci, a list of locus
Output:
objvalue, objective value of the LP for this solution
errObj, value of error component
propObj, value of proportion component
sampleAndStrainProp, a dictionary where key=sample name and value=dataframe which contains information about the strains and their proportions
feasible, indicator whether this solution is feasible
'''
def minNewStrainProp(solution, data, strains, refStrains, loci, newNameToOriName):
#paramaters
propFormat = 1 #proportion in percentage or fraction
#loci = ['clpA', 'clpX', 'nifS']
numLoci = len(loci)
#read data for samples and reference
reference = pd.read_csv(refStrains,sep="\t",usecols=range(1,numLoci+1))
lociNames = list(reference.columns.values)
numReference = reference.shape[0]
allSamples = data.keys()
#check proportions sum to 100
checkProp(data, propFormat)
#round the proportions to 3 decimal places
data = roundProp(data)
#As reference only contains numbers as entries, add gene name to the variants for better identification
for name in lociNames:
reference["%s" %name] = name + "_" + reference["%s" %name].astype(str)
#Get proportions of variants at different locus for each sample
varAndProp = returnVarAndProportions(data)
#Add propportion variables
proportionWeightDecVarDF = strains.merge(solution, how='left', indicator=True)
proportionWeightDecVarDF = proportionWeightDecVarDF[proportionWeightDecVarDF["_merge"] == "both"]
proportionWeightDecVarDF.drop(["_merge"], axis=1, inplace=True)
proportionWeightDecVarDF.reset_index(drop=True, inplace=True)
#For each variants, get a mapping of which strains it maps to. Only consider those strains in given solution
varSampToST = mapVarAndSampleToStrain(proportionWeightDecVarDF[loci+["Sample", "ST"]], loci, allSamples)
#Add proportion variables names
for samp in allSamples:
thisSample = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Sample']
propNameTemp = ["pi_%s_%d" %t for t in itertools.izip(thisSample, range(1,1+thisSample.shape[0]))]
#shorter name as CPLEX can't hold name with >16 char. Use last 3 digits of sample name to name decision variables i.e. SRR2034333 -> use 333
propNameTemp = [ele.replace("pi_{}".format(samp), "pi_{}".format(samp)) for ele in propNameTemp]
proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp, 'Decision Variable'] = propNameTemp
''' ===================================== Forming LP here =================================================== '''
#Form a CPLEX model
model = cplex.Cplex()
#minimize problem
model.objective.set_sense(model.objective.sense.minimize)
#add the decision variables for unqiue strain types
model.variables.add(obj=proportionWeightDecVarDF['Weights'].values.tolist(), lb=[0]*proportionWeightDecVarDF.shape[0], ub=[propFormat]*proportionWeightDecVarDF.shape[0], names=proportionWeightDecVarDF['Decision Variable'], types = [model.variables.type.continuous]* len(proportionWeightDecVarDF['Weights'].values.tolist()))
#add linear constraints such that for each sample, the sum of the proportions of its variants combination = 1
propVarSumTo1 = list()
for samp in allSamples:
temp = (proportionWeightDecVarDF.loc[proportionWeightDecVarDF['Sample'] == samp])['Decision Variable'].tolist()
propVarSumTo1.append([temp, [1]* len(temp)])
model.linear_constraints.add(lin_expr=propVarSumTo1, rhs=[propFormat]*len(propVarSumTo1), senses=["E"]*len(propVarSumTo1), names=["c{0}".format(i+1) for i in range(len(propVarSumTo1))])
#add error variables and linear constraints related to error terms
#create error variable names
varAndProp["Decision Variable"] = ["d_{}_".format(samp) for samp in varAndProp["Sample"].tolist() ]
varAndProp["Decision Variable"] = varAndProp["Decision Variable"] + varAndProp["Variant"]
#create artificial variable to minimize absolute value of error
# varAndProp["Artificial"] = ["f_s{}_".format(samp[-3:]) for samp in varAndProp["Sample"].tolist() ]
# varAndProp["Artificial"] = varAndProp["Artificial"] + varAndProp["Variant"]
#add error variables
#errorThres = 0.1
model.variables.add(obj=[1]*varAndProp.shape[0], names=varAndProp["Decision Variable"].tolist(), lb=[0]*varAndProp.shape[0], ub= [errorThres]*varAndProp.shape[0], types=[model.variables.type.continuous]*varAndProp.shape[0])
# model.variables.add(obj=[1]*varAndProp.shape[0], names=varAndProp["Artificial"].tolist(), lb=[0]*varAndProp.shape[0], ub= [0.2]*varAndProp.shape[0], types=[model.variables.type.continuous]*varAndProp.shape[0])
# artificial_constr1 = [[[artif, err],[1,1]] for artif, err in itertools.izip(varAndProp["Artificial"].tolist(), varAndProp["Decision Variable"].tolist())]
# artificial_constr2 = [[[artif, err],[1,-1]] for artif, err in itertools.izip(varAndProp["Artificial"].tolist(), varAndProp["Decision Variable"].tolist())]
# model.linear_constraints.add(lin_expr=artificial_constr1, rhs=[0]*len(artificial_constr1), senses=["G"]*len(artificial_constr1), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(artificial_constr1))])
# model.linear_constraints.add(lin_expr=artificial_constr2, rhs=[0]*len(artificial_constr2), senses=["G"]*len(artificial_constr2), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(artificial_constr2))])
#add linear constraints such that for each sample, sum of pi_ik \dot V_ik (proportion \dot matrix representation) across all combinations = Proportion matrix
piDotComb = list()
piDotComb_2 = list()
propConstrRHS = list()
for locusName in varSampToST:
temp=list()
varSampToSTDict = varSampToST[locusName][0]
for (var, sample) in varSampToSTDict:
strainTypes = varSampToSTDict[(var, sample)]
propDecVar = proportionWeightDecVarDF[(proportionWeightDecVarDF["ST"].isin(strainTypes)) & (proportionWeightDecVarDF["Sample"] == "{}".format(sample))]["Decision Variable"]
errorDecVar = varAndProp[(varAndProp["Variant"] == var) & (varAndProp["Sample"] == sample)]["Decision Variable"]
propConstrRHS.append( float( ( (data["{}".format(sample)])[locusName][0] )[var] ) )
piDotComb.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [-1]])
piDotComb_2.append([propDecVar.tolist() + errorDecVar.tolist(), [1]*len(propDecVar) + [1]])
model.linear_constraints.add(lin_expr=piDotComb, rhs=propConstrRHS, senses=["L"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
model.linear_constraints.add(lin_expr=piDotComb_2, rhs=propConstrRHS, senses=["G"]*len(propConstrRHS), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(propConstrRHS))])
#error must sum to 0
# errorSumTo0 = list()
# for samp, loc in list(set(itertools.izip(varAndProp["Sample"].tolist(), varAndProp["Locus"].tolist()))):
# temp = (varAndProp[(varAndProp["Sample"] == samp) & (varAndProp["Locus"] == loc)])["Decision Variable"].tolist()
# errorSumTo0.append([temp, [1]*len(temp)])
#
# model.linear_constraints.add(lin_expr=errorSumTo0, rhs=[0]*len(errorSumTo0), senses=["E"]*len(errorSumTo0), names=["c{0}".format(i+1+model.linear_constraints.get_num()) for i in range(len(errorSumTo0))])
# model.variables.set_upper_bounds([(i, 0.2) for i in varAndProp["Artificial"].tolist()])
''' ==== Solve ==== '''
model.set_problem_type(0) #set to LP problem
# model.set_results_stream(None)
# model.set_error_stream(None)
# model.write("a.lp")
model.solve()
# print model.solution.get_status_string()
feasible = False
if model.solution.get_status() == 1:
objvalue = model.solution.get_objective_value()
varNames = model.variables.get_names()
varValues = model.solution.get_values(varNames)
conclusion = pd.DataFrame(columns=["Decision Variable", "Value"])
conclusion["Decision Variable"] = varNames
conclusion["Value"] = varValues
conclusion = conclusion[conclusion["Value"] != 0.0]
print conclusion
print conclusion[conclusion["Decision Variable"].str.startswith("d_")]["Value"].sum()
errObj = conclusion[conclusion["Decision Variable"].str.startswith("d_")]["Value"].sum()
propVariables = conclusion[conclusion["Decision Variable"].str.startswith("pi_")]
propVariables = propVariables.merge(proportionWeightDecVarDF)
propVariables = propVariables[propVariables["Weights"] != 0]
propObj = propVariables["Value"].sum()
sampleAndStrainProp = dict()
for samp in allSamples:
output = proportionWeightDecVarDF[proportionWeightDecVarDF["Sample"] == samp].merge(conclusion)
output["New/Existing"] = ["Existing" if w==0 else "New" for w in output["Weights"].tolist()]
output.rename(columns={"Value":"Proportion"}, inplace=True)
output.drop("Decision Variable", axis=1, inplace=True)
output = output[["ST", "New/Existing"]+loci+["Proportion"]]
sampleAndStrainProp[newNameToOriName[samp]] = output
# print(output)
# output.to_csv("{0}/{1}_strainsAndProportions.csv".format(outputPath, samp))
feasible = True
else:
objvalue= -1
errObj = -1
propObj = -1
sampleAndStrainProp = list()
return objvalue, errObj, propObj, sampleAndStrainProp, feasible
|
988,721 | de49820749cdbe62fd62084a6d1bca95ebe41461 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def trimBST(self, root: TreeNode, low: int, high: int) -> TreeNode:
def bfs(node, low, high):
if not node:
return None
if node.val < low:
return bfs(node.right, low, high)
elif node.val > high:
return bfs(node.left, low, high)
else:
node.left = bfs(node.left, low, high)
node.right = bfs(node.right, low, high)
return node
return bfs(root, low, high)
|
988,722 | a9deddd9c156e494bc3fe1685dae76e50e69324f | from __future__ import print_function
import numpy as np
import extract
def swave(h,name="SWAVE.OUT",rep=1):
"""Write the swave pairing of a Hamiltonian"""
f1 = open("AMPLITUDE_"+name,"w")
f2 = open("PHASE_"+name,"w")
f3 = open(name,"w")
if not h.has_spin: raise
if not h.has_eh: raise
ds = extract.swave(h.intra) # get the pairing
r = h.geometry.r
f1.write("# x y |Delta|\n")
f2.write("# x y |phi|\n")
f3.write("# x y ReD ImD\n")
for i in range(len(r)):
ri = r[i] # position
di = ds[i]
f1.write(str(ri[0])+" ")
f2.write(str(ri[0])+" ")
f3.write(str(ri[0])+" ")
f1.write(str(ri[1])+" ")
f2.write(str(ri[1])+" ")
f3.write(str(ri[1])+" ")
f1.write(str(np.abs(di))+" ")
f2.write(str(np.angle(di))+" ")
f3.write(str(di.real)+" ")
f3.write(str(di.imag)+" ")
f1.write("\n")
f2.write("\n")
f3.write("\n")
f1.close()
f2.close()
f3.close()
def hopping(h,name="HOPPING.OUT",reps=0):
"""Write the magnitude of the hopping in a file"""
if h.has_eh: raise
if h.has_spin: (ii,jj,ts) = extract.hopping_spinful(h.intra)
else: (ii,jj,ts) = extract.hopping_spinless(h.intra)
f = open(name,"w") # write file
for (i,j,t) in zip(ii,jj,ts):
f.write(str(h.geometry.r[i][0])+" ")
f.write(str(h.geometry.r[i][1])+" ")
f.write(str(h.geometry.r[j][0])+" ")
f.write(str(h.geometry.r[j][1])+" ")
f.write(str(t)+"\n")
f.close()
def mz(h,name="MZ.OUT"):
if h.has_eh: raise
if h.has_spin: ms = extract.mz(h.intra)
else: raise
np.savetxt(name,np.matrix([range(len(ms)),ms]).T)
def magnetization(h):
"""Write all the magnetizations"""
if h.has_eh: raise
if h.has_spin:
mx = extract.mx(h.intra)
my = extract.my(h.intra)
mz = extract.mz(h.intra)
else: raise
np.savetxt("MAGNETIZATION_X.OUT",np.matrix([h.geometry.x,h.geometry.y,mx]).T)
np.savetxt("MAGNETIZATION_Y.OUT",np.matrix([h.geometry.x,h.geometry.y,my]).T)
np.savetxt("MAGNETIZATION_Z.OUT",np.matrix([h.geometry.x,h.geometry.y,mz]).T)
|
988,723 | e9719c49f82099bdd49f0ac274d7d540b85204d1 | from tkinter import *
from tkinter import messagebox
def clickImage(event):
messagebox.showinfo("마우스","사막에 마우스가 클릭됨")
window = Tk()
window.geometry("400x400")
photo=PhotoImage(file="gif/1.gif")
label1=Label(window,image=photo)
label1.bind("<Button>",clickImage)
label1.pack(expand=1,anchor=CENTER)
window.mainloop()
|
988,724 | ff4c16822ced15ed90cf67983175a5ae6ee9b488 | import datetime
from flask import (
request,
jsonify,
Blueprint
)
from flask_request_validator import(
Param,
PATH,
GET,
JSON,
validate_params
)
from decorator import login_required
from connection import get_connection
from internal_code_sheets import internal_code_sheet
from exceptions import (
NotFoundError,
InvalidDataError,
OutofStockError,
PriceDoesNotMatchError,
InvalidSalesQuantityError
)
def create_order_endpoints(order_service):
order_bp = Blueprint('orders', __name__, url_prefix = '/orders')
@order_bp.route('/status-updates', methods=['POST'])
@validate_params(
Param('order_status_id',JSON,int,required=True),
Param('order_id',JSON,list,required=True)
)
@login_required
def change_order_status(*args):
"""
주문 상태를 변경합니다.
Args:
order_id : 주문 아이디,
order_status_id : 주문상태 아이디,
Returns:
{'message':'SUCCESS','code':200}
Authors:
jisunn0130@gmail.com(최지선)
History:
2020.11.03(최지선) : 초기 생성
"""
try:
db_connection = get_connection()
order_lists = request.json
result = order_service.change_order_status(db_connection,order_lists)
if result:
db_connection.commit()
return jsonify(internal_code_sheet['S100']), (internal_code_sheet['S100']['code'])
except NotFoundError as e:
db_connection.rollback()
message = internal_code_sheet[e.code]
return jsonify(message), (message['code'])
except InvalidDataError as e:
db_connection.rollback()
message = internal_code_sheet[e.code]
return jsonify(message), (message['code'])
except Exception as e:
db_connection.rollback()
return jsonify({'error': f'{e}'}), 500
finally:
db_connection.close()
@order_bp.route('/lists/<int:order_status_id>', methods=['GET'])
@validate_params(
Param('order_status_id',PATH,int,required=False), #주문상태
Param('searching_category',GET,str,required=False), #검색 카테고리
Param('searching', GET,str,required=False), #검색어
Param('filter_ordering',GET,str,required=False), #정렬기준 : 주문일순 or 주문일역순
Param('filter_date_from',GET,str,required=False), #주문일 시작
Param('filter_date_to',GET,str,required=False), #주문일 끝
Param('offset',GET,int,required=False), #페이지네이션
Param('limit',GET,int,required=False), #페이지네이션
Param('seller_attribute_id',GET,list,required=False), #셀러속성
)
@login_required
def get_order_lists(*args):
"""
주문 리스트
Args:
order_status_id : 주문 상태 아이디
searching_category : 검색항목
searching : 검색어
filter_ordering : 정렬기준
filter_date_from : 필터 시작날짜
filter_date_to : 필터 끝날짜
offset : 페이지네이션 시작
limit : 페이지네이션 끝
seller_attribute_id : 셀러속성
Returns:
total_count : 주문 건수
{'message':'SUCCESS','code':200}
Authors:
jisunn0130@gmail.com(최지선)
History:
2020.11.03(최지선) : 초기 생성
"""
try:
db_connection = get_connection()
if args[4]:
filter_date_from = datetime.datetime.strptime(args[4],'%Y-%m-%d')
else:
filter_date_from = ""
if args[5]:
filter_date_to = datetime.datetime.strptime(args[5], '%Y-%m-%d')
else:
filter_date_to = ""
filter_dict = {
'order_status_id' : args[0],
'searching_category' : args[1],
'searching': args[2],
'filter_ordering': args[3],
'filter_date_from': filter_date_from,
'filter_date_to': filter_date_to,
'offset': args[6],
'limit': args[7],
'seller_attribute_id': args[8]
}
#filter_dict = {f'{key}':f'{value}' for (key, value) in filters.items() if value}
result = order_service.create_order_lists(db_connection, filter_dict)
return jsonify(result),(internal_code_sheet['S100']['code'])
except Exception as e:
db_connection.rollback()
return jsonify({'error': f'{e}'}), 500
finally:
db_connection.close()
@order_bp.route('/make-orders/<int:product_id>', methods=['GET'])
@validate_params(
Param('product_id',PATH,int,required=True)
)
@login_required
def get_product_options(*args):
"""
주문 리스트
Args:
product_id : 상품 아이디
Returns:
Authors:
jisunn0130@gmail.com(최지선)
History:
2020.11.08(최지선) : 초기 생성
"""
try:
db_connection = get_connection()
product = {
'product_id' : args[0]
}
product_options = order_service.get_product_option_lists(db_connection, product)
return jsonify(product_options),200
except OutofStockError as e:
db_connection.rollback()
message = internal_code_sheet[e.code]
return jsonify(message), (message['code'])
except Exception as e:
db_connection.rollback()
return jsonify({'error': f'{e}'}), 500
finally:
db_connection.close()
@order_bp.route('/make-orders', methods=['POST'])
@validate_params(
#들어온 파라미터들을 유효성 검사
#상품 주문정보
Param('product_id',JSON,int,required=True),
Param('color_id',JSON,int,required=True),
Param('size_id',JSON,int,required=True),
Param('quantity',JSON,int,required=True),
Param('discount_status_id',JSON,int,required=True),
Param('sales_price',JSON,int,required=True),
Param('discount_price',JSON,int,required=True),
Param('paid_total',JSON,int,required=True),
#주문자 정보
Param('customer_name',JSON,str,required=True),
Param('phone_number',JSON,str,required=True),
Param('postal_code',JSON,str,required=True),
Param('address_1',JSON,str,required=True),
Param('address_2',JSON,str,required=True),
)
@login_required
def make_orders(*args):
"""
새로운 주문을 생성합니다.
Args:
product_id : 상품 아이디
color_id : 컬러 아이디
size_id : 사이즈 아이디
quantity : 주문 수량
discount_status_id : 할인여부
sales_price : 판매가
discount_price : 할인가
paid_total : 총 결제금액
customer_name : 주문자 이름
phone_number : 핸드폰 번호
postal_code : 우편번호
address_1 : 도로명주소
address_2 : 상세주소
Returns:
{'message':'SUCCESS','code':200}
{'message':'PRICE_DOES_NOT_MATCH','code':400}
{'message':'INVALID_REQUEST','client_message':'최소 구매수량을 확인하세요','code':400}
{'message':'INVALID_REQUEST','client_message':'최대 구매수량을 확인하세요','code':400}
Authors:
jisunn0130@gmail.com(최지선)
History:
2020.11.03(최지선) : 초기 생성
"""
try:
#db 접속
db_connection = get_connection()
#request 로 들어온 order_info 받기
order_info = request.json
seller_id = request.seller_id
result = order_service.create_new_order(db_connection, order_info)
#주문 성공
if result == 'S100':
db_connection.commit()
return jsonify(internal_code_sheet[result]), (internal_code_sheet[result]['code'])
except OutofStockError as e:
db_connection.rollback()
message = internal_code_sheet[e.code]
return jsonify(message), (message['code'])
except InvalidSalesQuantityError as e:
db_connection.rollback()
message = internal_code_sheet[e.code]
return jsonify(message), (message['code'])
except PriceDoesNotMatchError as e:
db_connection.rollback()
message = internal_code_sheet[e.code]
return jsonify(message), (message['code'])
except Exception as e:
db_connection.rollback()
return jsonify({'error': f'{e}'}), 500
finally:
db_connection.close()
return order_bp
|
988,725 | d81a0dad717b2964134b46660ab7810dca1e000f | # 匿名函数 lambda 函数表达式 (语法糖)
# lambda函数表达式用来创建一些简单的函数,是函数创建的又一种方式
# 语法:lambda 参数列表 : 返回值
# 匿名函数一般都是作为参数使用,其他地方一般不会使用
print((lambda a, b: a + b)(10, 20))
func = lambda a, b: a + b # 也可以将匿名函数赋值给一个变量,一般不会这么用
print(func(10, 20))
# ============================================ 匿名函数的应用 =====================================================
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # 创建一个列表
r = filter(lambda i: i > 5, l)
print('filter -- ', list(r))
# map() 函数
# 可以对可迭代对象中的所有元素做指定的操作,然后将其添加到一个新的对象中返回
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
r = map(lambda i: i ** 2, l)
print('map -- ', list(r))
# sort() 函数
# 用来对 '列表' 中的元素进行排序
# sort()方法默认是直接比较列表中的元素的大小
# sorted()排序会影响原来的对象
# 在sort()可以接收一个关键字参数 key
# key需要一个函数作为参数
# 设置函数作为参数,每次会以列表中的一个元素作为参数来调用函数,并且使用函数的返回值来比较元素的大小进行排序
l = ['bb', 'aaaa', 'c', 'ddddddddd', 'fff']
l.sort(key=len) # len()返回序列的长度
print('sort -- ', l)
l = [2, 5, '1', 3, '6', '4']
l.sort(key=int)
print('sort -- ', l)
# sorted() 函数
# 与sort()的用法基本一致,但sorted()可以对 '任意的序列' 进行排序
# 使用sorted()排序不会影响原来的对象,而是返回一个新对象
l = (2, 5, '1', 3, '6', '4')
print('sorted()排序前:', l)
print('sorted()排序后:', sorted(l, key=int))
print(l)
|
988,726 | bf7191c30e171e13681f895b4173ab1976d2cc13 | s = input()
s_1 = int(s[0:2])
s_2 = int(s[2:4])
if (s_1>12 or s_1<1) and (s_2>12 or s_2<1):
print('NA')
elif s_1<=12 and s_1>=1 and s_2<=12 and s_2>=1:
print('AMBIGUOUS')
elif s_1<=12 and s_1>=1:
print('MMYY')
else:
print('YYMM') |
988,727 | b4ba63fab216ff011099ccad28271bad75fe2fb8 | #! /usr/bin/env python
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt. Contact: ctb@msu.edu
#
import sys
import math
from screed.fasta import fasta_iter
import khmer
K = 32
HASHTABLE_SIZE = int(1e9)
N_HT = 4
ABUND_THRESHOLD = 65
infile = sys.argv[1]
outfile = sys.argv[2]
outfp = open(outfile, 'w')
print 'making hashtable'
ht = khmer.new_counting_hash(K, HASHTABLE_SIZE, N_HT)
print 'eating', infile
ht.consume_fasta(infile)
print 'counting'
for n, record in enumerate(fasta_iter(open(infile))):
if n % 10000 == 0:
print>>sys.stderr, '...', n
seq = record['sequence']
if len(seq) < K:
continue
# ablate end
pos = len(seq) - K + 1
while pos >= 0:
if ht.get(seq[pos:pos + K]) < ABUND_THRESHOLD:
break
pos -= 1
if pos == -1:
continue
seq = seq[:pos + K]
# ablate beginning
pos = 0
while pos < len(seq) - K + 1:
if ht.get(seq[pos:pos + K]) < ABUND_THRESHOLD:
break
pos += 1
if pos == len(seq) - K + 1:
continue
seq = seq[pos:]
if ht.get_max_count(seq) >= ABUND_THRESHOLD:
continue
# save!
print >>outfp, '>%s\n%s' % (record['name'], seq)
|
988,728 | 8e5a69b47dc1fd2b2a5ba2f10f17cde35e607dcf | # -*- coding: utf-8 -*-
# MNIST Images Extractor
# Python code for extracting MNIST dataset images.
# MNIST Dataset:
# http://yann.lecun.com/exdb/mnist/
# Repository:
# https://github.com/amir-saniyan/MNISTImagesExtractor
import os
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import imageio
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
# Train set.
for i in range(mnist.train.num_examples):
image = mnist.train.images[i].reshape(28, 28)
label = mnist.train.labels[i]
directory_name = './images/train/' + str(label)
if not os.path.exists(directory_name):
os.makedirs(directory_name)
file_name = directory_name + '/' + str(i) + '.png'
print('Saving', file_name, '...')
imageio.imwrite(file_name, (image * 255).astype(np.uint8))
# Test set.
for i in range(mnist.test.num_examples):
image = mnist.test.images[i].reshape(28, 28)
label = mnist.test.labels[i]
directory_name = './images/test/' + str(label)
if not os.path.exists(directory_name):
os.makedirs(directory_name)
file_name = directory_name + '/' + str(i) + '.png'
print('Saving', file_name, '...')
imageio.imwrite(file_name, (image * 255).astype(np.uint8))
# Validation set.
for i in range(mnist.validation.num_examples):
image = mnist.validation.images[i].reshape(28, 28)
label = mnist.validation.labels[i]
directory_name = './images/validation/' + str(label)
if not os.path.exists(directory_name):
os.makedirs(directory_name)
file_name = directory_name + '/' + str(i) + '.png'
print('Saving', file_name, '...')
imageio.imwrite(file_name, (image * 255).astype(np.uint8))
print('OK')
|
988,729 | b96ae0c1a66773671c40f838cc9e99a03b35a9c9 | """ CorporateActions:1.2.2 """
"""----------------------------------------------------------------------------
MODULE
FCAExecute - Module which executes the Corp Actions listed in the
CorpAction table.
(c) Copyright 2003 by Front Capital Systems AB. All rights reserved.
DESCRIPTION
This module executes the Corporate Actions listed in the Corporate Action
table which have not already been executed, and which have an Ex Date set
which is on or before the date specified on the command line, or in the
Macro variables window of this module.
NOTE
The Acquire Day of any trade has to be the same or less than the Record
Date entered in the Corporate Action table, or they will not be included.
Furthermore, the Ex Date has to be on or before the 'CorpAct Date' provided
in the macro variables window in order for the script to include the
Corporate Action. Portfolios with a total position of zero, are not
included. Trades which are Void, Confirmed Void, Simulated, Reserved or
Terminated are not included. Only Corporate Actions with one or both of
the Statuses Instrument or Trade set to 'Script Update' are included.
----------------------------------------------------------------------------"""
try:
import string
except ImportError:
print 'The module string was not found.'
print
try:
import math
except ImportError:
print 'The module math was not found.'
print
try:
import time
except ImportError:
print 'The module time was not found.'
print
try:
from os import environ
except ImportError:
print 'The module os was not found.'
print
try:
import FCARollback
except ImportError:
print 'The module FCARollback was not found.'
print
import ael
try:
import FCAVariables
reload(FCAVariables)
import FCAAction
reload(FCAAction)
import FCAGeneral
reload(FCAGeneral)
except AttributeError, msg:
print 'WARNING! All FCAVariables have to be defined. Undefined:', msg
print 'Maybe you need to merge FCAVariablesTemplate and FCAVariables.'
from FCAVariables import *
#if log:
#logfile = FCAVariables.logfile #'c:\\temp\\corpact.txt'
#lf = open(logfile, 'a')
#FCAGeneral.close_log(lf)
#lf = open(logfile, 'a')
#else: lf = None
try: Default_Portfolio
except NameError: Default_Portfolio = None
"""----------------------------------------------------------------------------
Main
----------------------------------------------------------------------------"""
if __name__ == "__main__":
import sys, getopt
sys.path.append('/tmp')
try:
opts, args = getopt.getopt(sys.argv[1:], 'u:p:d:f:')
if len(opts) < 2: raise getopt.error, ''
except getopt.error, msg:
print msg
print 'Usage: ael <config name> FCAExecute.py -u <ARENA user> '\
'-p <ARENA password>\n-d <for how many days before today '\
'corp actions should be run> -f <"portf name">',
sys.exit(2)
### Default Values:
user = 0
passw = 0
days = 0
pf = 0
for o, a in opts:
if o == '-p': passw = a
if o == '-u': user = a
if o == '-d': days = a
if o == '-f': pf = a
### Recommend: Run in -archive mode. Please rearchive using the ExDate as
### date.
ael.connect(environ['ADS_ADDRESS'], user, passw, '', 1)
a = ael.date_today()
d = -int(days)
date = a.add_days(d)
if not pf: pf = None
if verb:
s = '\nFind Corporate Actions to be performed on or before %s.\n'\
% (date)
FCAGeneral.logme(s)
try:
for ca in FCAGeneral.get_corp_actions(date, corpacts):
FCAAction.perform_actions(verb, commit, ca, pf, date,
DefaultMethod)
except:
#if log: FCAGeneral.close_log(lf)
raise
else:
#"""
pfs = FCAGeneral.pf()
cas = FCAGeneral.scr_upd('Script Update')
CorpActError = '\nNOTE! ALL Corporate Actions listed will be changed. '\
'Please select from the Corporate Action application which'\
' Corporate Actions should be performed.'
ael_variables = [('date', 'CorpAct Date', 'string',\
[str(ael.date_today()), 'Today'],
FCAVariables.defaultCorpActDate, 0, 0),
('cod', 'CutOff Date', 'string',\
['2001-05-22', '2001-05-15'],
'1970-01-01',),
#('pf', 'Portfolio to place Trades', 'string', pfs,
#Default_Portfolio, 0,0),
('commit', 'Commit', 'string', ['0', '1'], `commit`),
('verb', 'Verbose Printouts', 'string', ['0', '1'], `verb`),
('method', 'Method', 'string',
['Close', 'Adjust'], FCAVariables.DefaultMethod, 0, 0),
('corpacts', 'CorpActs to be Performed', 'string', cas,\
'', 0, 1)]
def ael_main(ael_variables_dict):
series_difference = ael_variables_dict.get('series_difference')
if series_difference:
series_difference = int(series_difference)
_date = ael_variables_dict.get('date')
try:
if _date and 'TODAY'.find(_date.upper()) <> -1:
_date = str(ael.date_today())
except AttributeError: # Old Python Version.
if _date and string.find('TODAY', string.upper(_date)) <> -1:
_date = str(ael.date_today())
_pf = None
_commit = int(ael_variables_dict.get('commit'))
_verb = int(ael_variables_dict.get('verb'))
corpacts = ael_variables_dict.get('corpacts')
if not corpacts:
corpacts = ['All listed']
#CA_Method = str(ael_variables_dict.get('method'))
if ael_variables_dict.get('method'):
CA_Method = ael_variables_dict.get('method')
else:
CA_Method = FCAVariables.DefaultMethod
if _verb and _date:
_s = 'Find Corporate Actions to be performed on or before %s.'\
% (_date)
FCAGeneral.logme(_s)
#def run(e, pfid, corpacts, _commit, *rest):
### If want to run from ASQL-query.
#_pf = ael.Portfolio[pfid] ### If want to run from ASQL-query.
#_date = '2222-02-22'
#CA_Method = FCAVariables.DefaultMethod
try:
old_CA_Method = CA_Method
for ca in FCAGeneral.get_corp_actions(_date, corpacts):
if ca.seqnbr > 0:
ca = ca.clone()
CA_Method = old_CA_Method #So default will be kept.
### Maybe in the future we want to let the value in the
### macro window override the value in the GUI. But since the
### reason for adding a method in the GUI is to be able to have
### different methods per each corpact, for now the GUI method
### will override the macro variable.
try:
if ca.method != 'None':
CA_Method = ca.method
except AttributeError, msg:
_string = 'CorpAct GUI field %s does not exist.' % msg
FCAGeneral.logme(_string)
if CA_Method == 'Adjust':
AggError = FCAGeneral.aggregate_check(ca)
if AggError != None:
FCAGeneral.logme(AggError)
else:
FCAAction.perform_actions(ca, _verb, _commit, _pf,
_date, CA_Method)
else:
FCAAction.perform_actions(ca, _verb, _commit, _pf, _date,
CA_Method)
#if log:
#FCAGeneral.close_log(lf)
except:
#if log: FCAGeneral.close_log(lf)
raise
#return 'Successful' ### If want to run from ASQL-query.
|
988,730 | bc336af7a7c9ce69ca81c5cfc7fa4df35334b713 | import gzip
import math
import matplotlib.pyplot as plt
from tqdm import tqdm
def parse(x):
new_x = []
for k in tqdm(x):
new_x.append(k["helpful"][0]/(1.0*k["helpful"][1]))
print len(new_x)
return new_x
def show_histogram(x, xlabel, ylabel, title = ""):
plt.hist(x, bins = 30)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.show()
def show_scatter_plot(x,y):
plt.scatter(x,y)
plt.show() |
988,731 | 0b28811a5e0e9eff8d9d80e1ac4aaa0add27dbb0 |
import numpy as np
import heapq
from Model.Ant import Ant
from Model.Sensor import Sensor
def get_neighbors(c):
return [(c[0] - 1, c[1]), (c[0] + 1, c[1]), (c[0], c[1] - 1), (c[0], c[1] + 1)]
# calculate our H metric
def heuristic_distance(a, b):
return np.sqrt((b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2)
def reconstructPath(start,goal,came_from):
path = []
current = goal
while current != start:
path.append(current)
current = came_from[current]
path.append(start)
path.reverse()
return path
class Controller:
def __init__(self, mapM, dronePos, droneEnergy, sensorsPos):
self.__mapM = mapM
self.__dronePos = dronePos
self.__droneEnergy = droneEnergy
self.__sensorsPos = sensorsPos
self.__sensors = [Sensor(pos) for pos in sensorsPos]
self.__sensors.insert(0, Sensor(dronePos)) # add drone start
self.__problemSize = len(self.__sensors)
self.__sensorsPaths = dict()
self.__noEpoch = 0
self.__noAnts = 0
self.__alpha = 0.0
self.__beta = 0.0
self.__rho = 0.0
self.__q0 = 0.0
self.__pheromoneTrails = dict()
def setACOParameters(self, noEpoch, noAnts, alpha, beta, rho, q0):
self.__noEpoch = noEpoch
self.__noAnts = noAnts
self.__alpha = alpha
self.__beta = beta
self.__rho = rho
self.__q0 = q0
def epoch(self):
# initialize ant population
for s in self.__sensors[1:]:
s.setGoodEnergyArea()
ants = [Ant(self.__sensors, self.__sensorsPaths, self.__droneEnergy) for _ in range(self.__noAnts)]
# While the no. of steps required to identify the optimal solution is not performed
for i in range(self.__problemSize):
for ant in ants:
ant.addMove(self.__q0, self.__pheromoneTrails, self.__alpha, self.__beta)
fitnesses = [ants[i].fitness() for i in range(len(ants))]
deltaTrace = [1.0 / (fitnesses[i] if fitnesses[i] > 0 else 1) for i in range(len(ants))]
for key in self.__pheromoneTrails.keys():
self.__pheromoneTrails[key] = (1 - self.__rho) * self.__pheromoneTrails[key]
for i in range(len(ants)):
currentAntPath = ants[i].getPath()
for j in range(len(currentAntPath)-1):
s1 = currentAntPath[j]
s2 = currentAntPath[j+1]
self.__pheromoneTrails[s1,s2] = self.__pheromoneTrails[s1,s2] + deltaTrace[i]
f = [[ants[i].fitness(), i] for i in range(len(ants))]
bestAntIndex = max(f, key=lambda el: el[0])[1]
return ants[bestAntIndex]
def runACO(self):
# set intensity and quantity of pheromone of every path between the sensors to 0 at time 0
# self.__pheromoneTrails[sensor1,sensor2] = [intensity] deltaTrace = quantity = 0
for path_key in self.__sensorsPaths.keys():
self.__pheromoneTrails[path_key] = 1 # trace ?
# print(self.__pheromoneTrails)
best_ant = Ant([Sensor()],{},0)
for i in range(self.__noEpoch):
solution = self.epoch()
if solution.fitness() > best_ant.fitness():
best_ant = solution
print("Left energy:", best_ant.droneEnergy)
return best_ant.getPath() # get the best path
def getSensorsPositions(self):
return self.__sensorsPos
def getDroneEnergy(self):
return self.__droneEnergy
def calcSensorsSquaresPerEnergy(self):
for s in self.__sensors[1:]:
s.computeSquaresPerEnergy(self.__mapM)
def calculateMinDistanceBetweenSensors(self):
for s1 in self.__sensors:
for s2 in self.__sensors:
posS1 = s1.getPosition()
posS2 = s2.getPosition()
if (s1,s2) not in self.__sensorsPaths.keys():
self.__sensorsPaths[(s1,s2)] = self.searchAStar(posS1[0],posS1[1],posS2[0],posS2[1])
def getMinDistanceSensorsPaths(self):
return self.__sensorsPaths
# returns a list of moves as a list of pairs [x,y]
def searchAStar(self, initialX, initialY, finalX, finalY):
frontier = [] # priority queue
heapq.heappush(frontier, (0, (initialX, initialY)))
came_from = dict()
cost_so_far = dict() # g(n) <-> g score
came_from[(initialX,initialY)] = None
cost_so_far[(initialX,initialY)] = 0
while len(frontier):
current = heapq.heappop(frontier)[1]
if current == (finalX, finalY):
return reconstructPath((initialX, initialY), (finalX, finalY), came_from)
for neighbor in get_neighbors(current):
new_cost = cost_so_far[current] + 1 # graph.cost(current, neighbor) = 1
if (neighbor not in cost_so_far or new_cost < cost_so_far[neighbor]) and \
self.__mapM.checkValidMapPosition(neighbor[0], neighbor[1]):
cost_so_far[neighbor] = new_cost
priority = new_cost + heuristic_distance(neighbor, (finalX, finalY)) # h(n)
heapq.heappush(frontier, (priority, neighbor))
came_from[neighbor] = current
return None
|
988,732 | d4d3401d0afd28a828d2a869c442f01abf7f458c | from random_walk import *
import time
def main_serial(N, a, v, delta_t, t_simulasi, z_pusat, x_pusat, r):
start = time.time()
arr_r = bangkitkan_random()
arr_particle = inisialisasi_partikel(N, a, v, arr_r)
step = t_simulasi / delta_t
lingkaran = Hole(x_pusat, z_pusat, r) # ada di dinding y=a, titik pusat1: x, titik pusat2: y
hasil = []
t = []
i = 0
p_hist = []
while (i <= step):
arrPart = []
for j in range(N):
if (arr_particle[j].isInside == 1):
update_posisi2(arr_particle[j], delta_t, a, lingkaran)
arr_r = bangkitkan_random()
arr_particle[j].kecepatan = generate_kecepatan(v, arr_r[3], arr_r[4])
part = Particle(arr_particle[j].posisi,arr_particle[j].kecepatan)
arrPart.append(part)
hasil.append(cekJumlahInside(arr_particle))
t.append(delta_t * (i))
p_hist.append(arrPart)
i = i + 1
end = time.time()
elapsed = end - start
return [p_hist, hasil, t, elapsed]
# ======================= Parameter Simulasi =======================================================
N = 1000
a = 12
v = 6
delta_t = 0.1
t_simulasi = 1000
z_pusat = 1 / 2 * a
x_pusat = 1/2 * a
r = 3
# # ==================================================================================================
#
# # =================== Serial =======================================================================
[p_hist, hasil, t, elapsed] = main_serial(N, a, v, delta_t, t_simulasi, z_pusat, x_pusat, r)
# ==================================================================================================
print('=============== Serial Result ============')
print('N awal = ' + str(N))
print('sisa partikel = ' + str(hasil[len(hasil) - 1]))
print('time serial = ' + str(elapsed) + ' s')
print('=========================================') |
988,733 | bc062cfc4b883a4c39ab14a84ebdb4cf54768c49 | print("Простий калькулятор")
a = float(input("Введіть перше число: "))
b = float(input("Введіть друге число: "))
operation = input("ВВедіть необхідну операцію:")
result = None
if operation == "+":
result = a + b
elif operation == "-":
result = a - b
elif operation == "*":
result = a * b
elif operation == "/":
result = a / b
else:
print("Помилка")
if result is not None:
print("Результат:", result)
|
988,734 | 0272ffd4bb7e52e8b52e28388179a8eaf80e23ca | # Copyright 2021 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for atari envpool and speed benchmark."""
import os
import time
import numpy as np
from absl import logging
from absl.testing import absltest
from envpool.atari import AtariDMEnvPool, AtariEnvSpec, AtariGymEnvPool
from envpool.atari.atari_envpool import _AtariEnvPool, _AtariEnvSpec
try:
import cv2
except ImportError:
cv2 = None
class _AtariEnvPoolTest(absltest.TestCase):
def test_raw_envpool(self) -> None:
conf = dict(
zip(_AtariEnvSpec._config_keys, _AtariEnvSpec._default_config_values)
)
conf["task"] = b"pong"
conf["num_envs"] = num_envs = 3
conf["batch_size"] = batch = 3
conf["num_threads"] = 3 # os.cpu_count()
# conf["episodic_life"] = True
# conf["zero_discount_on_life_loss"] = True
env_spec = _AtariEnvSpec(tuple(conf.values()))
env = _AtariEnvPool(env_spec)
env._reset(np.arange(num_envs, dtype=np.int32))
state_keys = env._state_keys
total = 2000
actions = np.random.randint(6, size=(total, batch))
t = time.time()
for i in range(total):
state = dict(zip(state_keys, env._recv()))
obs = state["obs"]
if cv2:
cv2.imwrite(f"/tmp/log/raw{i}.png", obs[0, 1:].transpose(1, 2, 0))
action = {
"env_id": state["info:env_id"],
"players.env_id": state["info:players.env_id"],
"action": actions[i],
}
env._send(tuple(action.values()))
duration = time.time() - t
fps = total * batch / duration * 4
logging.info(f"Raw envpool FPS = {fps:.6f}")
def test_align(self) -> None:
"""Make sure gym's envpool and dm_env's envpool generate the same data."""
num_envs = 4
config = AtariEnvSpec.gen_config(task="space_invaders", num_envs=num_envs)
spec = AtariEnvSpec(config)
env0 = AtariGymEnvPool(spec)
env1 = AtariDMEnvPool(spec)
obs0 = env0.reset()
obs1 = env1.reset().observation.obs # type: ignore
np.testing.assert_allclose(obs0, obs1)
for i in range(1000):
action = np.random.randint(6, size=num_envs)
obs0 = env0.step(action)[0]
obs1 = env1.step(action).observation.obs # type: ignore
np.testing.assert_allclose(obs0, obs1)
if cv2:
cv2.imwrite(f"/tmp/log/align{i}.png", obs0[0, 1:].transpose(1, 2, 0))
def test_partial_step(self) -> None:
num_envs = 5
max_episode_steps = 10
config = AtariEnvSpec.gen_config(
task="defender", num_envs=num_envs, max_episode_steps=max_episode_steps
)
spec = AtariEnvSpec(config)
env = AtariGymEnvPool(spec)
for _ in range(3):
print(env)
env.reset()
partial_ids = [np.arange(num_envs)[::2], np.arange(num_envs)[1::2]]
env.step(np.zeros(len(partial_ids[1]), dtype=int), env_id=partial_ids[1])
for _ in range(max_episode_steps - 2):
info = env.step(
np.zeros(num_envs, dtype=int), env_id=np.arange(num_envs)
)[-1]
assert np.all(~info["TimeLimit.truncated"])
info = env.step(
np.zeros(num_envs, dtype=int), env_id=np.arange(num_envs)
)[-1]
env_id = np.array(info["env_id"])
done_id = np.array(sorted(env_id[info["TimeLimit.truncated"]]))
assert np.all(done_id == partial_ids[1])
info = env.step(
np.zeros(len(partial_ids[0]), dtype=int),
env_id=partial_ids[0],
)[-1]
assert np.all(info["TimeLimit.truncated"])
def test_benchmark(self) -> None:
if os.cpu_count() == 256:
num_envs = 645
batch = 248
num_threads = 248
total = 50000
else:
num_envs = 8
batch = 3
num_threads = 3
total = 1000
config = AtariEnvSpec.gen_config(
task="pong",
num_envs=num_envs,
batch_size=batch,
num_threads=num_threads,
thread_affinity_offset=0,
)
spec = AtariEnvSpec(config)
env = AtariGymEnvPool(spec)
env.async_reset()
action = np.ones(batch, dtype=np.int32)
t = time.time()
for _ in range(total):
info = env.recv()[-1]
env.send(action, info["env_id"])
duration = time.time() - t
fps = total * batch / duration * 4
logging.info(f"Python envpool FPS = {fps:.6f}")
if __name__ == "__main__":
absltest.main()
|
988,735 | 152cdd2f649d3594f6b2403ddfc8124655c5e592 | import os
import shutil
def main():
try:
path = 'C:\\Users\\ankan\\Desktop\\Datasets_Healthy_Older_People\\S1_dataset'
files = os.listdir(path)
path1 = 'C:\\Users\\ankan\\Desktop\\Datasets_Healthy_Older_People\\S2_dataset'
files1 = os.listdir(path1)
for index, file in enumerate(files):
if not os.path.exists(os.path.join(path, file)):
os.rename(os.path.join(path, file), os.path.join(path, ''.join([str(index), '.csv'])))
if not os.path.exists(os.path.join(path1, file)):
for index, file in enumerate(files1):
os.rename(os.path.join(path1, file), os.path.join(path1, ''.join([str(index), '.csv'])))
except Exception as e:
print(e)
if __name__ == "__main__":
main() |
988,736 | 322308ef3083a2d794d02a39fb6ba5b093afb248 | from setuptools import setup
import sys
import aws_with
install_requires = [
"boto3 >= 1.4.6",
"pyyaml",
]
if sys.version_info[:2] < (2, 7):
install_requires += [
"argparse",
]
setup(
name=aws_with.__title__,
version=aws_with.__version__,
description=aws_with.__summary__,
long_description=open("README.rst").read(),
license=aws_with.__license__,
url=aws_with.__uri__,
author=aws_with.__author__,
author_email=aws_with.__email__,
packages=["aws_with"],
install_requires=install_requires,
extras_require={},
data_files = [("", ["LICENSE.txt"])],
entry_points={'console_scripts': ['aws_with = aws_with.main:main']},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
)
|
988,737 | 82e9839f910d23791bb0c19470e02c0be2de39dd | import os
import zipfile
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
from tensorflow.keras import layers
import pprint
import numpy as np
print(tf.__version__)
gdrive_data_directory = "/content/drive/My Drive/Data"
data_tmp_directory = gdrive_data_directory
data_tmp_training = data_tmp_directory + "/training.csv"
data_tmp_test = data_tmp_directory + "/test.csv"
data_tmp_validation = data_tmp_directory + "/validation.csv"
batch_size = 512
default_values = ["0.0" for a in range (460)]
training_data_length = 8066567 # get_data_length(data_tmp_training)
test_data_length = 2558554 #get_data_length(data_tmp_test)
validation_data_length = 2016641 #2016648# get_data_length(data_tmp_validation)
print ('traininglength:', training_data_length)
print('test:', test_data_length)
print('validation', validation_data_length)
def write_text(path, text):
with open(path, "w", encoding="utf-8") as text_file:
print(text, file=text_file)
def _parse_line(line):
# Decode the line into its fields
fields = tf.io.decode_csv(line, default_values, field_delim='\t')
# Pack the result into a dictionary
features = [tf.strings.to_number(i, out_type=tf.dtypes.float32) for i in fields] #dict(zip(columns,fields))
label = features[0] #.pop('0matching')
categories = features[1:21]
count = features[21:22]
structure_basic_01 = features[22:67]
structure_basic_02 = features[67:112]
structure_basic_03 = features[112:157]
structure_basic_05 = features[157:200]
structure_template_01 = features[200:245]
structure_template_02 = features[245:290]
structure_template_03 = features[290:335]
structure_template_05 = features[335:370]
structure_template_06 = features[370:415]
structure_template_08 = features[415:460]
# label = features[0:15] #.pop('0matching')
# categories = features[15:35]
# count = features[35:36]
# structure_basic_01 = features[36:81]
# structure_basic_02 = features[81:126]
# structure_basic_03 = features[126:171]
# structure_basic_05 = features[171:215]
# structure_template_01 = features[215:260]
# structure_template_02 = features[260:305]
# structure_template_03 = features[305:350]
# structure_template_05 = features[350:385]
# structure_template_06 = features[385:430]
# structure_template_08 = features[430:475]
features = {
'categories':categories,
'templatecount':count,
'inputStructureBasic01':structure_basic_01,
'inputStructureBasic02':structure_basic_02,
'inputStructureBasic03':structure_basic_03,
'inputStructureBasic05':structure_basic_05,
'inputStructureTemplate01':structure_template_01,
'inputStructureTemplate02':structure_template_02,
'inputStructureTemplate03':structure_template_03,
'inputStructureTemplate05':structure_template_05,
'inputStructureTemplate06':structure_template_06,
'inputStructureTemplate08':structure_template_08}
# Separate the label from the features
return features, label
def createDataTensor(filePath, dataLength):
dataset = tf.data.TextLineDataset(filePath).skip(1)
dataset = dataset.map(_parse_line, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# dataset = dataset.repeat()
#dataset = dataset.shuffle(dataLength, reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
print(dataset)
return dataset
testDataset = createDataTensor(data_tmp_test, test_data_length)
print("load and predict")
model = tf.keras.models.load_model('/content/drive/My Drive/Data/model.h5')
test_steps = test_data_length // batch_size
results = []
count = 0
for features, label in testDataset:
label = label.numpy()
predictions = model.predict(features)
predictions = [a[0] for a in predictions]
for label, prediction in zip(label,predictions):
combined = {"label":label.item(),"prediction":prediction.item()}
results.append(combined)
count += 1
print (str(count) + "/" + str(test_steps))
print("Write Data")
import json
jsonstring = json.dumps(results)
result_file_path = '/content/drive/My Drive/Data/Prediction.txt'
write_text(result_file_path, jsonstring)
|
988,738 | ebc91fa75c4baa467a0842acc56c35195dfdebc6 | #test
import scrapy
class DmozSpider(scrapy.spiders.Spider):
name = "dmoz"
|
988,739 | a1025224fff76aee836ecf09e5384f9e919891e0 | from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from rest_framework import serializers
def verification_password(value: str) -> str:
"""Check password"""
if len(value) >= 8:
if any((c in set('QAZWSXEDCRFVTGBYHNUJMIKOLP')) for c in value):
if any((f in set('1234567890') for f in value)):
return make_password(value)
else:
raise serializers.ValidationError('Password must contain at least 1 number')
else:
raise serializers.ValidationError('Password must contain at least 1 uppercase letter')
else:
raise serializers.ValidationError('Password must have to have at least 8 characters')
def verification_unique_email(value: str) -> str:
"""Checking unique of email"""
user = User.objects.filter(email=value)
if len(user) == 0:
return value
else:
raise serializers.ValidationError('User with given credentials already exist')
def verification_unique_username(value: str) -> str:
"""Checking unique of username"""
user = User.objects.filter(username=value)
if len(user) == 0:
return value
else:
raise serializers.ValidationError('User with given credentials already exist')
def verification_exist_email(value: str) -> str:
"""Checking exist email"""
user = User.objects.filter(email=value)
if len(user) != 0:
return value
else:
raise serializers.ValidationError('User with given credentials are not found')
def verification_email_and_return_username(value: str) -> str:
"""Checking exist email and return value"""
user = User.objects.filter(email=value)
if len(user) != 0:
return user[0].username
else:
raise serializers.ValidationError('User with given credentials are not found')
|
988,740 | 4955d98a1da3163fda2bbe65aa99e895fb45f702 | from __future__ import print_function
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
nCase = int(raw_input())
eprint("There are %d cases" % nCase)
for caseI in range(1, nCase + 1):
n, k = [int(s) for s in raw_input().split(" ") ]
eprint("Input : n: {}, k: {}".format(n, k))
r = n
l = n
while(k > 1):
pos = n / 2 + n%2
l = pos - 1
r = pos - n%2
k -= 1 # One has been placed
if(k%2):
n = max(l,r)
k = k/2 + 1
else:
n= min(l,r)
k = k/2
pos = n / 2 + n%2
l = pos - 1
r = pos - n%2
sol = "Case #{}: {} {}".format(caseI, max(l,r), min(r,l))
eprint(sol)
print(sol)
|
988,741 | e8834bf404c48ed7f9278fd9a9346ffe5e76f386 | import numpy as np
import matplotlib.pylab as plt
unif = np.random.random(1000)*20 -10
fig0 = plt.figure(figsize=(8,4))
ax = fig0.add_subplot(111)
ax.hist(unif, bins = 100)
ax.set_xlabel('Números aleatorios')
ax.set_ylabel('Repeticiones')
ax.set_title('Distribución Uniforme de Números Aleatorios')
plt.savefig('uniforme.pdf')
gauss = np.random.normal(17,5,1000)
fig1 = plt.figure(figsize=(8,4))
ax = fig1.add_subplot(111)
ax.hist(gauss, bins = 100)
ax.set_xlabel('Números aleatorios')
ax.set_ylabel('Repeticiones')
ax.set_title('Distribución Gaussiana de Números Aleatorios')
plt.savefig('gaussiana.pdf')
cuad1 = np.random.random(1000)*30.5 -0
cuad2 = np.random.random(1000)*30.5 -0
fig2 = plt.figure(figsize=(8,4))
ax = fig2.add_subplot(111)
ax.plot(cuad1, cuad2,'.r')
ax.set_xlabel('Números aleatorios $X$')
ax.set_ylabel('Números aleatorios $Y$')
ax.set_title('Distribución Aleatoria en Cuadrado de Lado 30.5')
plt.savefig('cuadrado.pdf')
r = 23
circX = np.random.random(1000)*46 -23
circY = np.empty([1000])
for i in range(len(circX)):
y = np.sqrt((r**2 - np.abs(circX[i])**2))
circY[i] = np.random.random(1)*np.abs(y*2) -y
fig3 = plt.figure(figsize=(8,8))
ax = fig3.add_subplot(111)
ax.plot(circX, circY,'.r')
ax.set_xlabel('Números aleatorios $X$')
ax.set_ylabel('Números aleatorios $Y$')
ax.set_title('Distribución Aleatoria en Circulo de Radio 23')
plt.savefig('circulo.pdf')
|
988,742 | f0c60fedf1bf8f79a98ae6b42962e4cc7f88ff52 | """ Views fro dutch postalcodes """
from __future__ import absolute_import
import json
from django.http import HttpResponse
from planner.nlpostalcode.models import Street
def get_streets(fourpp, chars):
return Street.objects.filter(postcode__fourpp=fourpp).filter(chars=chars).all()
def get_info_on_postalcode(_, postalcode):
""" Returns street and town for a complete postalcode """
fourpp = int(postalcode[0:4])
chars = postalcode[4:6]
streets = get_streets(fourpp, chars)
if streets:
street = streets[0]
town = street.postcode.city.get_official_name()
address = street.street
data = {'found': True, 'address': address, 'town': town}
else:
data = {'found': False}
j = json.dumps(data)
return HttpResponse(j, content_type='application/json')
|
988,743 | cdf93628aa5432539fdb991a039b25a8cfa0e618 | from dataclasses import dataclass
import os
import traceback
from utils.ncbi_database import NCBIDatabase
from utils.gene_util import get_opposite_dna
from utils.str_util import StrConverter
@dataclass
class GeneExtract:
data_path: str
rna_path: str
output_directory: str
gene_extract_based: str = 'gene'
left_idx: int = -2
right_idx: int = -1
def __post_init__(self):
file_name = os.path.basename(self.rna_path)
file_prefix = StrConverter.extract_file_name(file_name)
self.result_path = os.path.join(self.output_directory, '%s_extract_result.txt' % file_prefix)
self.gene_reader = NCBIDatabase(self.data_path)
self.headers = {}
self.inv_headers = []
def generate_header(self, items):
for idx, col_name in enumerate(items.strip().split('\t')):
self.headers[col_name] = idx
self.inv_headers.append(col_name)
def run(self):
dna_code = self.gene_reader.dna_code
with open(self.result_path, 'w', encoding='utf8') as fw:
if self.gene_extract_based == 'gene':
self.extract_sequence_based_on_gene(dna_code, fw)
elif self.gene_extract_based == 'range':
self.extract_sequence_based_on_range(dna_code, fw)
def extract_sequence_based_on_gene(self, dna_code, fw):
fw.write('No\tgene\tfrom\t\tend\tproduct\tsequence\n')
for gene_idx, gene in enumerate(open(self.rna_path)):
gene = gene.strip()
succ = False
for idx in self.gene_reader.gene_name_segment_map.get(gene, []):
gene_segment = self.gene_reader.gene_segments[idx]
succ = True
start = gene_segment.cds[0]
end = gene_segment.cds[1]
product = gene_segment.product
sequence = dna_code[start - 1:end]
fw.write('d%d\t%s\t%s\t%s\t%s\t%s\n' % (
gene_idx + 1, gene, start, end, product, sequence))
if not succ:
print('%s not found in %s' % (gene, self.data_path))
def extract_sequence_based_on_range(self, dna_code, fw):
lines = [line.strip() for line in open(self.rna_path, 'r', encoding='utf8')]
self.generate_header(lines[0])
fw.write(lines[0] + '\n')
for line in lines[1:]:
result = {}
infos = line.strip().split('\t')
for idx, info in enumerate(infos):
result[self.inv_headers[idx]] = info
if result.get('sequence', '') == '':
try:
a, b = map(int, [infos[self.left_idx], infos[self.right_idx]])
left = min(a, b)
right = max(a, b)
direction = a < b
# id start from 0
left -= 1
right -= 1
if not direction:
left += 1
right += 1
dna = dna_code[left:right]
if not direction:
result['sequence'] = get_opposite_dna(dna[::-1])
else:
result['sequence'] = dna
except:
print(infos)
traceback.print_exc()
fw.write(self.extract_output(result) + '\n')
def extract_output(self, result):
output = []
for name in self.inv_headers:
output.append(result.get(name, ''))
return '\t'.join(output)
|
988,744 | d71e9b38b8964336d750bcbc4071487a434db335 | __author__ = 'nsonepa'
|
988,745 | 35b7f749766a2363b6cd815b1b2ec39c221c8be3 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-07-31 04:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Oneplus5',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('h2', models.CharField(max_length=30)),
('title', models.CharField(max_length=30)),
('title_desc', models.CharField(max_length=200)),
('img_url', models.ImageField(upload_to=b'uploads/')),
],
options={
'verbose_name': 'OnePlus5',
'verbose_name_plural': 'OnePlus5',
},
),
migrations.CreateModel(
name='OnePlusNav',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=16, verbose_name='\u914d\u7f6e\u4ecb\u7ecd')),
('index', models.IntegerField(default=999, verbose_name='\u6392\u5e8f')),
],
options={
'verbose_name': 'oneplus\u914d\u7f6e',
'verbose_name_plural': 'oneplus\u914d\u7f6e',
},
),
]
|
988,746 | 00dfcd11008f038c65499662362ceaead3cdf417 | #Create a string and print its length using the len() function
word = "Harrison"
print(len(word))
#Create two strings, concatenate them (add them next to each other) and print
#the combination of the two strings
word1 = "Ha"
word2 = "rry"
word3 = word1 + word2
print(word3)
#Create two string variables, then print one of them after the other
#(with a space added in between) using a comma in your print statement
word1 = "Ha"
word2 = "rry"
word4 = word1 + ", " + word2 + "."
print(word4)
#print the string "zing" by using subscripting and index numbers on the string
#"bazinga" to specify the correct range of characters
target = "bazinga"
print(target[2:6])
|
988,747 | 11a1e06ddfb456bc086cf60e8faf6f64ae22ad26 | import datetime
import enum
import os
from itertools import chain
from django.views.generic import TemplateView
from django.shortcuts import render
from django.db.models import Q
from django.http import JsonResponse
from django.http.response import HttpResponseBadRequest
from rest_framework import status, generics
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from . import models, serializers
from .permissions import *
from .managers.time_table import TimeTable
from .serializers import PlanItemSerializer, PlanSerializer, GlobalSearchSerializer, AdvancedSearchSerializer, \
SavePostSerializer, ShowPostSerializer, FollowingsSerializer, TopPostSerializer, LocationPostSerializer, \
UserPlansSerializer, ImageSerializer, TopPlannerSerializer, CustomUserDetailsSerializer
class ActionOnFollowRequestType(enum.Enum):
accept = 1,
reject = 2
class FollowingState(enum.Enum):
Follow = 1,
Following = 2,
Requested = 3,
Own = 4
class HomePageView(TemplateView):
def get(self, request, **kwargs):
return render(request, 'index.html', context=None)
class CitiesListView(generics.ListAPIView):
"""List of cities in database, include name and id"""
queryset = models.City.objects.all()
serializer_class = serializers.CitySerializer
permission_classes = [AllowAny]
class SuggestListView(generics.ListAPIView):
"""List of some suggestion according to selected city"""
serializer_class = serializers.SuggestSerializer
permission_classes = [AllowAny]
def get_queryset(self):
city_id = self.kwargs.get('id')
city = get_object_or_404(models.City, pk=city_id)
queryset = list(models.Restaurant.objects.filter(city=city).order_by('-rating')[0:3])
queryset += models.RecreationalPlace.objects.filter(city=city).order_by('-rating')[0:3]
queryset += models.Museum.objects.filter(city=city).order_by('-rating')[0:3]
return queryset
class SuggestPlanView(generics.RetrieveAPIView):
"""Get a plan suggestion to user"""
permission_classes = [AllowAny]
def get(self, request, *args, **kwargs):
dest_city = models.City.objects.get(pk=self.kwargs.get('id'))
start_day = datetime.datetime.strptime(self.request.query_params.get('start_date'), "%Y-%m-%dT%H:%MZ")
finish_day = datetime.datetime.strptime(self.request.query_params.get('finish_date'), "%Y-%m-%dT%H:%MZ")
result = self.get_plan(dest_city, start_day, finish_day)
return JsonResponse(data=result, status=status.HTTP_200_OK)
def get_plan(self, dest_city, start_date, finish_date):
time_table = TimeTable(start_date, finish_date)
time_table.create_table(120, 60)
time_table.tagging()
time_table.set_places(dest_city)
plan = time_table.get_json_table()
return plan
class PlansView(generics.ListCreateAPIView):
serializer_class = serializers.PlanSerializer
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
plans = models.Plan.objects.filter(user=self.request.user)
data = serializers.MyPlansSerializer(instance=plans, many=True).data
return Response(data=data, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
if not request.data.get('plan_items'):
return HttpResponseBadRequest("Error : The plan items doesn't exist.")
if not request.data.get('image'):
return HttpResponseBadRequest("Error : The cover image doesn't exist.")
if not request.data.get('description'):
return HttpResponseBadRequest("Error : The description doesn't exist.")
plan = self.create_plan(request.data)
self.create_plan_items(request.data['plan_items'], plan.id)
request.data['content'] = request.data['description']
post = self.save_post(request.data, plan.id)
post_id = post.pk
image = request.data['image']
modified_data = {'post': post_id, 'image': image}
serializer = ImageSerializer(data=modified_data)
if serializer.is_valid(True):
serializer.save()
return Response(status=status.HTTP_200_OK)
def create_plan_items(self, plan_items, plan_id):
for item in plan_items:
item['plan'] = plan_id
serializer = PlanItemSerializer(data=item)
if serializer.is_valid(True):
serializer.save()
def create_plan(self, plan_dict):
plan_dict['user'] = self.request.user.id
plan_dict['creation_date'] = datetime.datetime.now()
serializer = PlanSerializer(data=plan_dict)
if serializer.is_valid(True):
plan = serializer.save()
return plan
return None
def save_post(self, data, plan_id):
data['type'] = 'plan_post'
data['creation_date'] = datetime.datetime.now()
data['user'] = self.request.user.id
data['plan_id'] = plan_id
serializer = SavePostSerializer(data=data)
if serializer.is_valid(True):
return serializer.save()
class GetUpdateDeletePlanView(generics.RetrieveUpdateDestroyAPIView):
permission_classes = [AllowAny, GetUpdateDeletePlanPermission]
def get(self, request, *args, **kwargs):
plan_id = self.kwargs.get('id')
plan = get_object_or_404(models.Plan, pk=plan_id)
plan_details = serializers.PlanSerializer(instance=plan).data
plan_details['plan_items'] = []
plan_items = models.PlanItem.objects.filter(plan=plan)
for plan_item in plan_items:
plan_item_details = serializers.PlanItemSerializer(plan_item).data
plan_item_details.pop('plan')
plan_details['plan_items'].append(plan_item_details)
return Response(data=plan_details)
def patch(self, request, *args, **kwargs):
if not self.request.data.get('plan_items'):
return HttpResponseBadRequest("error: field 'plan_items' is required.")
plan_items = self.request.data['plan_items']
plan_id = self.kwargs.get('id')
plan = get_object_or_404(models.Plan, id=plan_id)
if self.request.data.get('cover'):
post = get_object_or_404(models.Post, type='plan_post', plan_id=plan_id)
image = get_object_or_404(models.Image, post=post.id)
os.remove(image.image.path)
image.delete()
data = {'image': self.request.data.get('cover'), 'post': post.id}
serializer = ImageSerializer(data=data)
if serializer.is_valid(True):
serializer.save()
plan_detail = self.request.data
plan_detail['id'] = plan_id
plan_detail.pop('plan_items')
plan_serializer = serializers.UpdatePlanSerializer(instance=plan, data=plan_detail, partial=True)
if plan_serializer.is_valid(True):
plan_serializer.save()
plan_items_create_data = []
plan_items_update_data = []
plan_items_update_id = []
instances = []
for plan_item in plan_items:
plan_item['plan'] = plan_id
plan_item_id = plan_item.get('id')
if plan_item_id is None:
plan_items_create_data.append(plan_item)
else:
plan_items_update_data.append(plan_item)
plan_items_update_id.append(plan_item_id)
instances.append(get_object_or_404(models.PlanItem, pk=plan_item_id))
serializer = serializers.PatchPlanItemSerializer(instance=instances,
data=plan_items_update_data, many=True)
if serializer.is_valid(True):
serializer.save()
models.PlanItem.objects.filter(plan=plan_id).exclude(id__in=plan_items_update_id).delete()
serializer = serializers.PatchPlanItemSerializer(data=plan_items_create_data, many=True)
if serializer.is_valid(True):
serializer.save()
return Response()
def delete(self, request, *args, **kwargs):
models.Plan.objects.filter(pk=self.kwargs.get('id')).delete()
return Response()
class GlobalSearchList(generics.ListAPIView):
serializer_class = GlobalSearchSerializer
permission_classes = [AllowAny]
def get_queryset(self):
city_id = self.kwargs.get('id')
city = get_object_or_404(models.City, pk=city_id)
query = self.request.query_params.get('query', None)
restaurants = models.Restaurant.objects.filter(Q(name__icontains=query) & Q(city=city))
museums = models.Museum.objects.filter(Q(name__icontains=query) & Q(city=city))
cafes = models.Cafe.objects.filter(Q(name__icontains=query) & Q(city=city))
recreationalplaces = models.RecreationalPlace.objects.filter(Q(name__icontains=query) & Q(city=city))
touristattractions = models.TouristAttraction.objects.filter(Q(name__icontains=query) & Q(city=city))
hotels = models.Hotel.objects.filter(Q(name__icontains=query) & Q(city=city))
shoppingmalls = models.ShoppingMall.objects.filter(Q(name__icontains=query) & Q(city=city))
all_results = list(chain(restaurants, museums, cafes, recreationalplaces,
touristattractions, hotels, shoppingmalls))
return all_results
class LocationTypes(enum.Enum):
Restaurant = 1
Museum = 2
Cafe = 3
Hotel = 4
RecreationalPlace = 5
TouristAttraction = 6
ShoppingMall = 7
class AdvancedSearch(generics.CreateAPIView):
serializer_class = AdvancedSearchSerializer
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
all_result = self.get_queryset(request.data)
return Response(data=all_result)
def get_queryset(self, data):
city_id = self.kwargs.get('id')
city = models.City.objects.get(pk=city_id)
rate = data['rate']
for type_loc in data['types']:
if type_loc == LocationTypes.Restaurant.value:
all_results = list(models.Restaurant.objects.filter(Q(rating__gte=rate) & Q(city=city)))
elif type_loc == LocationTypes.Museum.value:
all_results += models.Museum.objects.filter(Q(rating__gte=rate) & Q(city=city))
elif type_loc == LocationTypes.Hotel.value:
all_results += models.Hotel.objects.filter(Q(rating__gte=rate) & Q(city=city))
elif type_loc == LocationTypes.Cafe.value:
all_results += models.Cafe.objects.filter(Q(rating__gte=rate) & Q(city=city))
elif type_loc == LocationTypes.RecreationalPlace.value:
all_results += models.RecreationalPlace.objects.filter(Q(rating__gte=rate) & Q(city=city))
elif type_loc == LocationTypes.TouristAttraction.value:
all_results += models.TouristAttraction.objects.filter(Q(rating__gte=rate) & Q(city=city))
elif type_loc == LocationTypes.ShoppingMall.value:
all_results += models.ShoppingMall.objects.filter(Q(rating__gte=rate) & Q(city=city))
return all_results
class ShowPostView(generics.ListAPIView):
permission_classes = [AllowAny]
serializer_class = ShowPostSerializer
def get_queryset(self):
user = self.request.user.id
following_users = [item['following_user_id'] for item in
models.UserFollowing.objects.filter(user_id=user).values('following_user_id')]
return following_users
def get(self, request, *args, **kwargs):
user = self.request.user.id
following_users = self.get_queryset()
if not self.request.query_params.get('page').isdigit():
return HttpResponseBadRequest("Error : the page number is not correct.")
page_number = int(self.request.query_params.get('page'))
posts = models.Post.objects.filter(Q(user=user) | Q(user__in=following_users) |
Q(user__is_public=True)).order_by('-creation_date')[(page_number - 1)
* 20:page_number * 20]
posts_data = serializers.ShowPostSerializer(instance=posts, many=True).data
for data in posts_data:
data['destination_city'] = get_object_or_404(models.Plan, id=data['plan_id']).destination_city.name
data['user_name'] = get_object_or_404(models.BegardUser, id=data['user']).email
data['user_profile_image'] = get_object_or_404(models.BegardUser, id=data['user']).profile_img.url
data['number_of_likes'] = models.Like.objects.filter(post=data['id']).count()
data['number_of_comments'] = models.Comment.objects.filter(post=data['id']).count()
data['is_liked'] = models.Like.objects.filter(Q(user=user) & Q(post=data['id'])).exists()
images = models.Image.objects.filter(post=data['id'])
data['images'] = [image.image.url for image in images]
if data['user'] == user:
data['following_state'] = FollowingState.Own.name
elif data['user'] in following_users:
data['following_state'] = FollowingState.Following.name
else:
data['following_state'] = FollowingState.Follow.name
return Response(posts_data, status=status.HTTP_200_OK)
class SearchPostView(generics.ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class = ShowPostSerializer
def get(self, request, *args, **kwargs):
user_following = self.get_queryset()
if not (self.request.query_params.get('city')).isdigit():
return Response(data={"error: ": "the page number is not correct."}, status=status.HTTP_400_BAD_REQUEST)
city = self.request.query_params.get('city', None)
plans = models.Plan.objects.filter(destination_city=city)
queryset = models.Post.objects.filter((Q(plan_id__in=plans) & Q(user__id__in=user_following)) |
(Q(plan_id__in=plans) & Q(user__is_public=True)))
return Response(data=queryset, status=status.HTTP_200_OK)
def get_queryset(self):
user = self.request.user.id
user_following = models.UserFollowing.objects.filter(user_id=user)
return user_following
class CommentsOnPostView(generics.ListCreateAPIView):
permission_classes = [IsAuthenticated, LikeAndCommentOnPostPermission]
serializer_class = serializers.CreateCommentSerializer
def get(self, request, *args, **kwargs):
post_id = self.kwargs.get('id')
comments = models.Comment.objects.filter(post=post_id)
serializer_data = serializers.CreateCommentSerializer(instance=comments, many=True).data
for data in serializer_data:
user = models.BegardUser.objects.get(id=data['user'])
data['user_name'] = user.email
data['user_profile_img'] = user.profile_img.url
return Response(data=serializer_data, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
data = self.request.data
data['post'] = self.kwargs.get('id')
data['user'] = self.request.user.id
post = get_object_or_404(models.Post, id=data['post'])
comment_serializer = serializers.CreateCommentSerializer(data=data)
comment = None
if comment_serializer.is_valid(True):
comment = comment_serializer.save()
comment_data = serializers.CreateCommentSerializer(instance=comment).data
user = get_object_or_404(models.BegardUser, id=comment_data['user'])
comment_data['user_name'] = user.email
comment_data['user_profile_img'] = user.profile_img.url
return Response(data=comment_data, status=status.HTTP_201_CREATED)
class ListOfFollowingsView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = FollowingsSerializer
def get_queryset(self):
user = self.request.user.id
return models.UserFollowing.objects.filter(user_id=user)
class DeleteFollowingsView(generics.DestroyAPIView):
permission_classes = (IsAuthenticated,)
def delete(self, request, *args, **kwargs):
user_id = self.request.user.id
following_user_id = self.kwargs.get('id')
instance = get_object_or_404(models.UserFollowing, user_id=user_id, following_user_id=following_user_id)
instance.delete()
return Response()
class FollowersView(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = FollowingsSerializer
def get_queryset(self):
user = self.request.user.id
queryset = models.UserFollowing.objects.filter(Q(following_user_id=user))
return queryset
class LikeOnPostView(generics.ListCreateAPIView, generics.DestroyAPIView):
permission_classes = [IsAuthenticated, LikeAndCommentOnPostPermission]
serializer_class = serializers.CreateLikeSerializer
def get(self, request, *args, **kwargs):
post_id = self.kwargs.get('id')
user_id = self.request.user.id
like_numbers = models.Like.objects.filter(post=post_id).count()
is_liked = models.Like.objects.filter(Q(user=user_id) & Q(post=post_id)).exists()
return Response(data={'like_numbers': like_numbers, 'is_liked': is_liked}, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
data = {
'user': self.request.user.id,
'post': self.kwargs.get('id')
}
post = get_object_or_404(models.Post, id=data['post'])
exist_like = models.Like.objects.filter(Q(user=data['user']) & Q(post=data['post'])).exists()
if exist_like:
return HttpResponseBadRequest("this post is liked by you.now you are trying to like again.")
serializer = serializers.CreateLikeSerializer(data=data)
if serializer.is_valid(True):
serializer.save()
return Response()
def delete(self, request, *args, **kwargs):
data = {
'user': self.request.user.id,
'post': self.kwargs.get('id')
}
post = get_object_or_404(models.Post, id=data['post'])
like = models.Like.objects.filter(Q(user=data['user']) & Q(post=post.id))
if like.exists():
like.delete()
return Response()
class FollowingRequestView(generics.CreateAPIView):
permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
data = self.request.data
data['request_from'] = self.request.user.id
if not (data.get('request_to') and isinstance(data['request_to'], int)):
return HttpResponseBadRequest("field 'request_to' with a digit required.")
if models.UserFollowing.objects.filter(user_id=data['request_from'],
following_user_id=data['request_to']).exists():
return HttpResponseBadRequest('this user is followed by you, you can not request to follow this user')
request_to_user = get_object_or_404(models.BegardUser, id=data['request_to'])
if request_to_user.is_public:
follow_user_data = {"user_id": data['request_from'], "following_user_id": data['request_to']}
serializer = serializers.FollowingsSerializer(data=follow_user_data)
if serializer.is_valid(True):
serializer.save()
return Response(data={"status": "Followed", "follow_request_id": None},
status=status.HTTP_201_CREATED)
else:
serializer = serializers.FollowingRequestSerializer(data=data)
if serializer.is_valid(True):
follow_request = serializer.save()
return Response(data={"status": "Requested", "follow_request_id": follow_request.id},
status=status.HTTP_201_CREATED)
return Response(status.HTTP_406_NOT_ACCEPTABLE)
class FollowersRequestsView(generics.ListAPIView):
"""get list of followers requests"""
permission_classes = (IsAuthenticated,)
serializer_class = serializers.FollowersRequestsSerializer
def get_queryset(self):
user = self.request.user
return models.FollowRequest.objects.filter(request_to=user)
class AnswerFollowRequestView(generics.UpdateAPIView):
"""Accept or reject a follow request"""
permission_classes = [IsAuthenticated, AnswerFollowRequestPermission]
def get_object(self):
follow_request = get_object_or_404(models.FollowRequest, id=self.kwargs.get('id'))
self.check_object_permissions(request=self.request, obj=follow_request)
return follow_request
def patch(self, request, *args, **kwargs):
follow_request = self.get_object()
action = self.request.query_params.get('action')
if not ((action == ActionOnFollowRequestType.accept.name) or (action == ActionOnFollowRequestType.reject.name)):
return HttpResponseBadRequest("error: problem in query params.")
if action == ActionOnFollowRequestType.accept.name:
data = {'user_id': follow_request.request_from_id, 'following_user_id': follow_request.request_to_id}
serializer = serializers.FollowingsSerializer(data=data)
if serializer.is_valid(True):
serializer.save()
follow_request.delete()
return Response()
class DeleteFollowRequestView(generics.DestroyAPIView):
"""Delete a follow request"""
permission_classes = [IsAuthenticated, DeleteFollowRequestPermission]
def get_object(self):
follow_request = get_object_or_404(models.FollowRequest, id=self.kwargs.get('id'))
self.check_object_permissions(request=self.request, obj=follow_request)
return follow_request
class TopPostsView(generics.ListAPIView):
serializer_class = TopPostSerializer
permission_classes = [AllowAny]
def get_queryset(self):
queryset = models.Post.objects.filter(Q(user__is_public=True) & Q(type='plan_post')).order_by('-rate')[0:20]
return queryset
def get(self, request, *args, **kwargs):
posts = self.get_queryset()
posts_data = serializers.TopPostSerializer(instance=posts, many=True).data
for data in posts_data:
data['city'] = get_object_or_404(models.Plan, id=data['plan_id']).destination_city.name
data['user_name'] = get_object_or_404(models.BegardUser, id=data['user']).email
data['profile_image'] = get_object_or_404(models.BegardUser, id=data['user']).profile_img.url
data['cover'] = get_object_or_404(models.Image, post__pk=data['id']).image.url
return Response(posts_data, status=status.HTTP_200_OK)
class LocationPostView(generics.CreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class = LocationPostSerializer
def post(self, request, *args, **kwargs):
if not self.request.data.get('image'):
return HttpResponseBadRequest("the images does not exists.")
images = request.data['image']
post = self.save_post(request.data)
post_id = post.pk
for image in images:
modified_data = self.modify_input_for_multiple_files(image['image'], post_id)
serializer = ImageSerializer(data=modified_data)
if serializer.is_valid(True):
serializer.save()
response_data = self.get_last_post(post)
return Response(data=response_data, status=status.HTTP_200_OK)
def get_last_post(self, post):
data = serializers.ShowPostSerializer(instance=post).data
data['user_name'] = post.user.email
data['user_profile_image'] = post.user.profile_img.url
data['destination_city'] = post.plan_id.destination_city.name
data['number_of_likes'] = models.Like.objects.filter(post=post.id).count()
data['is_liked'] = models.Like.objects.filter(post=post.id, user=self.request.user).exists()
images = models.Image.objects.filter(post=post.id)
data['images'] = [image.image.url for image in images]
data['following_state'] = FollowingState.Own.name
data['number_of_comments'] = models.Comment.objects.filter(post=post.id).count()
return data
def modify_input_for_multiple_files(self, image, post):
list_element = {'post': post, 'image': image}
return list_element
def save_post(self, data):
data['creation_date'] = datetime.datetime.now()
data['user'] = self.request.user.id
serializer = LocationPostSerializer(data=data)
if serializer.is_valid(True):
return serializer.save()
class ProfileDetailsView(generics.RetrieveAPIView):
"""Get profile details of a user"""
permission_classes = [AllowAny]
def get(self, request, *args, **kwargs):
source_user = self.request.user
target_user = get_object_or_404(models.BegardUser, id=self.kwargs.get('id'))
data = dict()
data['username'] = target_user.email
data['profile_image'] = target_user.profile_img.url
data['posts_count'] = models.Post.objects.filter(user=target_user).count()
data['followings_count'] = models.UserFollowing.objects.filter(user_id=target_user).count()
data['followers_count'] = models.UserFollowing.objects.filter(following_user_id=target_user).count()
data['follow_request_id'] = None
if source_user == target_user:
following_state = FollowingState.Own.name
elif models.UserFollowing.objects.filter(user_id=source_user.id, following_user_id=target_user).exists():
following_state = FollowingState.Following.name
elif models.FollowRequest.objects.filter(request_from=source_user.id, request_to=target_user).exists():
following_state = FollowingState.Requested.name
data['follow_request_id'] = get_object_or_404(models.FollowRequest, request_from=source_user.id,
request_to=target_user).id
else:
following_state = FollowingState.Follow.name
data['following_state'] = following_state
return Response(data=data, status=status.HTTP_200_OK)
class UserPostsView(generics.ListAPIView):
"""List of posts of a user"""
serializer_class = serializers.ShowPostSerializer
permission_classes = [AllowAny, AllowGetUserPosts]
def get_queryset(self):
target_user = get_object_or_404(models.BegardUser, id=self.kwargs.get('id'))
return models.Post.objects.filter(user=target_user)
def get(self, request, *args, **kwargs):
target_user = get_object_or_404(models.BegardUser, id=self.kwargs.get('id'))
source_user = self.request.user
self.queryset = self.get_queryset()
posts = list(self.queryset)
serializer_data = ShowPostSerializer(instance=self.queryset, many=True).data
for i in range(len(self.queryset)):
serializer_data[i]['user_name'] = posts[i].user.email
serializer_data[i]['user_profile_image'] = posts[i].user.profile_img.url
serializer_data[i]['destination_city'] = posts[i].plan_id.destination_city.name
serializer_data[i]['number_of_likes'] = models.Like.objects.filter(post=posts[i].id).count()
serializer_data[i]['number_of_comments'] = models.Comment.objects.filter(post=posts[i].id).count()
serializer_data[i]['is_liked'] = models.Like.objects.filter(post=posts[i].id, user=source_user.id).exists()
images = models.Image.objects.filter(post=posts[i].id)
serializer_data[i]['images'] = [image.image.url for image in images]
if source_user == target_user:
serializer_data[i]['following_state'] = FollowingState.Own.name
elif models.UserFollowing.objects.filter(user_id=source_user.id, following_user_id=target_user).exists():
serializer_data[i]['following_state'] = FollowingState.Following.name
else:
serializer_data[i]['following_state'] = FollowingState.Follow.name
return Response(serializer_data, status.HTTP_200_OK)
class TopPlannerView(generics.ListAPIView):
permission_classes = [AllowAny]
serializer_class = TopPlannerSerializer
def get_queryset(self):
user_auth = self.request.user.id
followers = models.UserFollowing.objects.filter(user_id=user_auth)
follow_requests = models.FollowRequest.objects.filter(request_from=user_auth)
follow_requests_list = list(follow_requests)
follow_requests_id = []
followers_list = list(followers)
following_id = []
for item in followers_list:
following_id.append(item.following_user_id.id)
for item in follow_requests_list:
follow_requests_id.append(item.request_to.id)
users = models.BegardUser.objects.exclude(Q(pk__in=following_id) | Q(pk=user_auth) |
Q(pk__in=follow_requests_id))
users_list = list(users)
for person in users_list:
posts = models.Post.objects.filter(Q(user_id__in=users) & Q(user_id=person.id))
number_of_likes = 0
for item in posts:
number_of_likes += models.Like.objects.filter(post_id=item.id).count()
person.average_rate = number_of_likes
sorted_list = sorted(users_list, key=lambda x: x.average_rate)[0:20]
sorted_list.reverse()
for person in users_list:
if sorted_list[0].average_rate != 0:
person.average_rate = (person.average_rate / sorted_list[0].average_rate) * 9.9
else:
person.average_rate = 0
return sorted_list
class UserPlansView(generics.ListAPIView):
serializer_class = UserPlansSerializer
permission_classes = [AllowAny]
def get(self, request, *args, **kwargs):
user_pk = self.kwargs.get('id')
self_user = self.request.user.id
followings = models.UserFollowing.objects.filter(user_id=self_user)
followings_list = list(followings)
following_id = []
for item in followings_list:
following_id.append(item.following_user_id.id)
plans = models.Plan.objects.filter(Q(user_id__in=following_id) & Q(user_id=user_pk) |
Q(user__is_public=True) & Q(user_id=user_pk))
data = serializers.UserPlansSerializer(instance=plans, many=True).data
return Response(data=data, status=status.HTTP_200_OK)
class LocationsOfPlanView(generics.ListAPIView):
"""List of locations of a plan according to 'id'"""
permission_classes = [IsAuthenticated, IsPlanOwner]
serializer_class = serializers.LocationOfPlanSerializer
def get_queryset(self):
return models.PlanItem.objects.filter(plan=self.kwargs.get('id'))
class UserSearchView(generics.ListAPIView):
permission_classes = [AllowAny]
serializer_class = CustomUserDetailsSerializer
def get_queryset(self):
query = self.request.query_params.get('query', None)
result = models.BegardUser.objects.filter(email__icontains=query)
return result
class UserFollowingView(generics.ListAPIView):
permission_classes = [AllowAny, IsPublicOrFollowing]
serializer_class = serializers.ListOfFollowingsSerializer
def get_queryset(self):
user = self.kwargs.get('id')
return models.UserFollowing.objects.filter(Q(user_id=user))
class UserFollowerView(generics.ListAPIView):
permission_classes = [AllowAny, IsPublicOrFollowing]
serializer_class = serializers.ListOfFollowersSerializer
def get_queryset(self):
user = self.kwargs.get('id')
return models.UserFollowing.objects.filter(Q(following_user_id=user))
|
988,748 | 30922b06ccf05d0831c3c4c89e2fc626312e2e59 | """Trivial example: minimize x**2 from any start value"""
import lbfgs
import sys
from scipy.optimize import minimize, rosen, rosen_der
import numpy as np
x0 = np.array([1.3, 0.7])
def f(x, g):
g[:] = rosen_der(x)
print "one call"
return rosen(x)
def progress(x, g, f_x, xnorm, gnorm, step, k, ls):
"""Report optimization progress."""
#print("x = %8.2g f(x) = %8.2g f'(x) = %8.2g" % (x, f_x, g))
pass
print("Minimum found", lbfgs.fmin_lbfgs(f, x0, progress))
|
988,749 | eb0e24ce149891cbc81d4e3ea374fe1c99823312 | import json
import hmac
import hashlib
import requests
def main():
with open("notifications.json", "rb") as fd:
byt = fd.read()
sig = hmac.new(b"1234", msg=byt, digestmod=hashlib.sha256).hexdigest()
requests.post("http://localhost:5000/webhooks/", data=byt,
headers={"X-Sqreen-Integrity": sig,
"Content-Type": "application/json"})
if __name__ == "__main__":
main()
|
988,750 | 70da3e252e456f219e9bd4a0a5df5f88cb083378 | import datetime
from nt import replace
from xml.etree.ElementTree import tostring
import psycopg2
from flask import Flask, json, request
from flask_restful import Resource, reqparse
from sqlalchemy.dialects.postgresql import json
from sqlalchemy.dialects.postgresql.dml import insert
from storyboard_root.models.writer_model import WriterModel
parser = reqparse.RequestParser()
parser.add_argument("writer_userid", type = int, help ="Please pass a valid writer name")
parser.add_argument("first_name", type = str, help ="Please pass a valid writer name")
parser.add_argument("last_name", type = str, help ="Please pass a valid writer name")
parser.add_argument("writerid", type = str, help ="Please pass a valid writer name")
class Writer(Resource):
def get(self):
try:
if(request.args["request_code"]=="1"):
print(request.args["writer_userid"])
writer = WriterModel.get_writer_by_userid(request.args["writer_userid"])
if writer:
return writer.json()
else:
return {"message" : "writer doesn't exist"}
else:
pass #to be implemeted writer list
except:
print("exception occured")
|
988,751 | 0e14d529ebe7c0479c307fcc87ae6b26ae0e3b5e | from django.conf.urls import url
from . import api
urlpatterns = [
url("convert/url/$", api.ConverterUrlAPI.as_view()),
url("convert/file/$", api.ConverterFileAPI.as_view()),
]
|
988,752 | 1d04b5cb9c95891a499fb88d96de0012a1dd698b | # coding:utf-8
__author__ = '123woscc'
__date__='2017/7/19'
import os
from multiprocessing.dummy import Pool
import requests
from bs4 import BeautifulSoup
session = requests.session()
domain_url = 'https://www.pixiv.net'
headers = {
'Referer': 'https://www.pixiv.net/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
#登陆保持session
def pixiv_login(pixiv_id,password):
login_url = 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index'
soup = BeautifulSoup(session.get(login_url).content, 'lxml')
post_key = soup.find(id='old-login').input.get('value')
formdata = {
'pixiv_id': pixiv_id,
'password': password,
'captcha': '',
'g_recaptcha_response': '',
'post_key': post_key,
'source': 'pc',
'ref': 'wwwtop_accounts_index',
'return_to': 'http://www.pixiv.net/'
}
session.post('https://accounts.pixiv.net/api/login',data=formdata)
print(BeautifulSoup(session.get('http://www.pixiv.net/setting_profile.php').content,'lxml').find('input', id='nick').get('value'))
return session
#获取国际排行榜TOP100链接
def get_top_urls():
top_url = 'https://www.pixiv.net/ranking_area.php?type=detail&no=6'
urls = []
html = session.get(top_url)
soup = BeautifulSoup(html.content, 'lxml')
for div in soup.find_all('div', class_='ranking-item'):
url = domain_url + div.find_all('a')[1].get('href')
urls.append(url)
return urls
#获取条目图片链接
def get_page_imgs(url):
try:
html = session.get(url, headers=headers)
soup = BeautifulSoup(html.content, 'lxml')
if soup.find('a', class_='read-more'):
more_url = domain_url + soup.find('a', class_='read-more').get('href')
html2 = session.get(more_url, headers=headers)
soup2 = BeautifulSoup(html2.content, 'lxml')
page = soup2.find_all('a', class_='full-size-container')
urls = [domain_url + img.get('href') for img in page]
imgs = []
for url in urls:
img = BeautifulSoup(session.get(url, headers=headers).content, 'lxml').find('img').get('src')
imgs.append(img)
return imgs
else:
imgs = soup.find_all('img', class_='original-image')
return [img.get('data-src') for img in imgs]
except:
print('获取图片链接失败',url)
#保存图片
def save_imgs(urls):
if not os.path.exists(dir_path):
try:
os.mkdir(dir_path)
except Exception as e:
print('文件夹创建失败:',e)
if len(urls)>1:
return
else:
url = urls[0]
file_name = url.split('/')[-1]
try:
with open('{}/{}'.format(dir_path,file_name), 'wb') as f:
f.write(session.get(url, headers=headers).content)
print('[下载完成]:', file_name)
except Exception as e:
print('保存图片失败', url)
if __name__ == '__main__':
#Pixiv邮箱密码
pixiv_id = 'xxxxx@xx.com'
password = 'password'
pixiv_login(pixiv_id, password)
#文件夹目录
dir_path = './full'
pages=get_top_urls()
#多线程爬取图片链接
pool=Pool(32)
imgs=pool.map(get_page_imgs,pages)
pool.close()
pool.join()
print(imgs)
print('开始下载')
#多线程下载图片
pool2 = Pool(16)
pool2.map(save_imgs,imgs)
pool2.close()
pool2.join()
print('下载完成')
|
988,753 | 8fc95719ecaebb63f472eb327de1fecbf67f9c36 |
# -*- coding: utf8 -*-
import dateparser
from datetime import datetime
from scrapy.http import Request
from alascrapy.spiders.base_spiders.ala_spider import AlaSpider
import alascrapy.lib.dao.incremental_scraping as incremental_utils
from alascrapy.items import ProductItem, ReviewItem
class Mobiltelefon_ruSpider(AlaSpider):
name = 'mobiltelefon_ru'
allowed_domains = ['mobiltelefon.ru']
base_url = 'https://mobiltelefon.ru/contents_obzor_{}.html'
start_urls = ['https://mobiltelefon.ru/contents_obzor_0.html']
page = 0
def __init__(self, *args, **kwargs):
super(Mobiltelefon_ruSpider, self).__init__(self, *args, **kwargs)
self.stored_last_date = incremental_utils.get_latest_pro_review_date(
self.mysql_manager, self.spider_conf["source_id"])
# In order to test another stored_last_date
# self.stored_last_date = datetime(2018, 2, 8)
def parse(self, response):
# print " ...PARSE: " + response.url
# print " --self.stored_last_date: " + str(self.stored_last_date)
review_sections_xpath = '//section[@class="block_news"]'
review_sections = response.xpath(review_sections_xpath)
date = None
for r_sec in review_sections:
date_xpath = './/p[@class="line2"]/a[1]/following-'\
'sibling::text()[1]'
date = r_sec.xpath(date_xpath).get()
# date looks like: | 10 февраля 2019, 14:29 |
date = date.split(', ')[0]
date = date.replace('|', '')
date = date.strip()
date = dateparser.parse(date,
date_formats=['%d %B %Y'],
languages=['ru', 'es'])
if date > self.stored_last_date:
url_xpath = './@onclick'
url = r_sec.xpath(url_xpath).get()
# url looks like:
# "url_loc='https://mobiltelefon.ru/post_1555664522.html'"
url = url.split("loc='")[-1]
url = url.split("';")[0]
yield Request(url=url,
callback=self.parse_product_review,
meta={'date': date.strftime("%Y-%m-%d")})
# Checking whether we should scrape the next page
if date > self.stored_last_date:
self.page += 1
yield Request(url=self.base_url.format(self.page),
callback=self.parse)
def get_product_name_based_on_title(self, title):
''' This function will 'clean' the title of the review
in order to get the product name '''
p_name = title
# Spliting title
if u':' in p_name:
p_name = p_name.split(u':')[0]
# Removing certain words
words_to_remove = [u'Тест',
u'Распаковка и обзор',
u'Распаковка',
u'Обзор',
u'обзор',
u'Распаковка, игровой тест и первые впечатления от',
u'Знакомимся с',
u'Тест камеры',
u'Как снимает камера',
u'Быстрый']
for w in words_to_remove:
if w in title:
p_name = p_name.replace(w, '')
# Removing unnecessary spaces:
p_name = p_name.replace(' ', ' ')
p_name = p_name.strip()
return p_name
def parse_product_review(self, response):
# print " ...PARSE_PRODUCT_REVIEW: " + response.url
title = response.xpath('//meta[@property="og:title"]/@content').get()
drop_review_words = ['PS4',
u'Сравнение',
'PocketBook']
for word in drop_review_words:
if word in title:
return
cat = None
if ("Watch" in title) or ("watch" in title):
cat = "Smartwatch"
elif u'планшет' in title:
cat = "Tablet"
else:
cat = "Smartphone"
# REVIEW ITEM -------------------------------------------------------
review_xpaths = {
'TestTitle': '//meta[@property="og:title"]/@content',
'TestSummary': '//meta[@name="description"]/@content',
}
# Create the review
review = self.init_item_by_xpaths(response, "review", review_xpaths)
# 'ProductName'
title = review['TestTitle']
review['ProductName'] = self.get_product_name_based_on_title(title)
# 'Author'
author_xpath = '//a[text()="Mobiltelefon"]/preceding-sibling::text()'
author = response.xpath(author_xpath).get()
# author looks like: © Артур Лучкин.
author = author.replace(u'©', '')
author = author.replace('.', '')
author = author.strip()
review['Author'] = author
# 'TestDateText'
review['TestDateText'] = response.meta.get('date')
# 'DBaseCategoryName'
review['DBaseCategoryName'] = 'PRO'
# 'source_internal_id'
sid = response.url.split('post_')[-1]
sid = sid.split('.html')[0]
review['source_internal_id'] = sid
# -------------------------------------------------------------------
# PRODUCT ITEM ------------------------------------------------------
product = ProductItem()
product['source_internal_id'] = review['source_internal_id']
product['OriginalCategoryName'] = cat
product['ProductName'] = review['ProductName']
pic_url_xpath = '//meta[@property="og:image"]/@content'
pic_url = response.xpath(pic_url_xpath).get()
product['PicURL'] = pic_url
product['TestUrl'] = response.url
# -------------------------------------------------------------------
yield review
yield product
|
988,754 | ff480f75d9b23bf4fe93810ba357f74b941bd435 | # Generated by Django 3.0.7 on 2020-06-25 05:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20200622_0609'),
]
operations = [
migrations.DeleteModel(
name='Apply',
),
]
|
988,755 | f06bfb4327fe1798bb7ee5f653c4985093aaf022 | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Post
class UserSerializer(serializers.ModelSerializer):
posts = serializers.PrimaryKeyRelatedField(many=True, queryset=Post.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'email', 'password', 'posts')
write_only_fields = ('password',)
class PostSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
title = serializers.CharField(required=False, allow_blank=True, max_length=100)
content = serializers.CharField(required=False, allow_blank=True, max_length=1000)
user = serializers.ReadOnlyField(source='user.username')
def create(self, validated_data):
"""
创建新的Post
"""
return Post.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
更新
"""
instance.title = validated_data.get('title', instance.title)
instance.content = validated_data.get('content', instance.content)
instance.save()
return instance
|
988,756 | b1169ea1ad81d81ac5c4d89d31ee979d044761e5 | from nets import alexnet_224
from nets import alexnet_cifar
nets_map = {
'alexnet_224' : alexnet_224,
'alexnet_cifar' : alexnet_cifar,
}
def get_network(name):
if name not in nets_map:
raise ValueError('Name of net unkonw %s' % name)
return nets_map[name] |
988,757 | fd31c4db53b972b9d9a84db2cc34c104361d1edd | #! /usr/bin/env python
"""
A helper executable for xgui; creates one GUI display window
"""
import sys
from gui import gui
if __name__ == "__main__":
gui.display(sys.argv[1]) |
988,758 | b245a429c86d223ab7b785ffa798e9a3071090b2 | número = 0
contador = 0
soma = 0
while True:
número=int(input('Digite um número:'))
if número == 999:
break
contador = contador + 1
soma = soma + número
print('Na lista existem {} números e a soma entre eles vale {}.'.format(contador,soma))
print('Fim! Até breve!') |
988,759 | 7db7182cf93a0c5b96d702c2c60a8c84d9b32d5b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import datetime as dt
from rest_framework import status as http_status
import logging
import unittest
import markupsafe
import mock
import pytest
from nose.tools import * # noqa: F403
import re
from django.utils import timezone
from addons.wiki.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.base import fake
from osf_tests.factories import (
fake_email,
AuthUserFactory,
NodeFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
RegistrationFactory,
SubjectFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
)
from osf.utils import permissions
from addons.wiki.models import WikiPage, WikiVersion
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from website import settings, language
from addons.osfstorage.models import OsfStorageFile
from website.util import web_url_for, api_url_for
from api_tests import utils as test_utils
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 308)
assert_in('/login/', res.headers['Location'])
@pytest.mark.enable_bookmark_creation
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
# `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator
# if user is not logged in, she/he is further redirected to CAS login page
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/').follow()
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
res = self.app.get('/{0}/addons/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('Add important information, links, or images here to describe your project.', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page_name = 'home'
wiki_content = 'Kittens'
wiki_page = WikiFactory(
user=self.user,
node=project,
)
wiki = WikiVersionFactory(
wiki_page=wiki_page,
content=wiki_content
)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page_name,
), auth=self.auth)
assert_not_in('Add important information, links, or images here to describe your project.', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
WikiPage.objects.create_for_node(project, 'WöRlÐé', 'new content', Auth(self.user))
wv = WikiVersion.objects.get_for_node(project, non_ascii)
assert wv.wiki_page.page_name.upper() == non_ascii.upper()
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('Add important information, links, or images here to describe your project.', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
@pytest.mark.enable_bookmark_creation
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
@pytest.mark.enable_bookmark_creation
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in('Anonymous Contributors', res.body.decode())
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.add(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body.decode()
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': 'not_valid'},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body.decode()
)
@pytest.mark.enable_bookmark_creation
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
@pytest.mark.enable_bookmark_creation
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user)
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = WikiFactory(
user=self.user,
node=self.component,
)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
# In the following tests, we need to patch `framework.csrf.handlers.get_current_user_id`
# because in `framework.csrf.handlers.after_request`, the call to `get_current_user_id`
# will always return None when we make requests with basic auth. That means csrf_token
# for every basic auth request will be different, which should be the correct behavior.
# But it breaks the assertions because the server-side rendered forms in the body carries different
# csrf tokens.
# The original tests are written without the patch, and they pass because
# `get_current_user_id` returned a truthy value even for basic auth requests
# because of some hack that we did, resulting in same csrf token across different basic auth requests.
def test_project_url(self):
with mock.patch('framework.csrf.handlers.get_current_user_id', return_value=self.user._id):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
with mock.patch('framework.csrf.handlers.get_current_user_id', return_value=self.user._id):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
with mock.patch('framework.csrf.handlers.get_current_user_id', return_value=self.user._id):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_implicit_clean
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake_email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake_email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake_email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip('as long as E-mails cannot be changed')
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
user1.emails.create(address=email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_bookmark_creation
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake_email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body.decode())
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
res = res.follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project, self.user.unclaimed_records)
def test_claim_user_registered_preprint_with_correct_password(self):
preprint = PreprintFactory(creator=self.referrer)
name, email = fake.name(), fake_email()
unreg_user = preprint.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = unreg_user.get_claim_url(preprint._id)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body.decode())
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
preprint.reload()
unreg_user.reload()
# user is now a contributor to the project
assert_in(reg_user, preprint.contributors)
# the unregistered user (unreg_user) is removed as a contributor, and their
assert_not_in(unreg_user, preprint.contributors)
# unclaimed record for the project has been deleted
assert_not_in(preprint, unreg_user.unclaimed_records)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body.decode())
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails.first().address
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = 'random@random.com'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('forgotpassword', location)
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body.decode())
assert_in('forgotPasswordForm', res.forms)
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.deactivate_account()
self.user.save()
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
class TestForgotPasswordInstitution(OsfTestCase):
def setUp(self):
super(TestForgotPasswordInstitution, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('redirect_unsupported_institution')
self.post_url = web_url_for('forgot_password_institution_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on institutional forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('campaign=unsupportedinstitution', location)
assert_in('logout?service=', location)
# test that institutional forgot password page redirects to CAS unsupported
# institution page
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('campaign=unsupportedinstitution', location)
# test that user from disabled institution can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# submit email to institutional forgot-password page
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.post(self.post_url, {'forgot_password-email': 'fake' + self.user.username})
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword-institution
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive institutional reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.deactivate_account()
self.user.save()
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword-institution
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# submit institutional forgot-password request in rapid succession
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
res = self.app.post(self.post_url, {'forgot_password-email': self.user.username})
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
@unittest.skip('Public projects/components are dynamically loaded now.')
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_shows_projects_with_many_contributors(self):
# My project has many contributors
for _ in range(5):
user = UserFactory()
self.project.add_contributor(user, auth=Auth(self.project.creator), save=True)
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
res = self.app.get(url, auth=self.me.auth)
# I see '3 more' as a link
assert_in('3 more', res)
res = res.click('3 more')
assert_equal(res.request.path, self.project.url)
def test_has_no_public_projects_or_components_on_own_profile(self):
# User goes to their profile
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
# user has no public components/projects
assert_in('You have no public projects', res)
assert_in('You have no public components', res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
# regression test
def test_does_not_show_registrations(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user, is_public=False)
# User has a registration with public components
reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True)
for each in reg.nodes:
each.is_public = True
each.save()
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# Registration does not appear on profile
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public components', res)
assert_not_in(reg.title, res)
assert_not_in(reg.nodes[0].title, res)
@pytest.mark.enable_bookmark_creation
class TestPreprintBannerView(OsfTestCase):
def setUp(self):
super(TestPreprintBannerView, self).setUp()
self.admin = AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.read_contrib = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.project_one = ProjectFactory(creator=self.admin, is_public=True)
self.project_one.add_contributor(self.write_contrib, permissions.WRITE)
self.project_one.add_contributor(self.read_contrib, permissions.READ)
self.subject_one = SubjectFactory()
self.preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True)
self.preprint.add_contributor(self.write_contrib, permissions.WRITE)
self.preprint.add_contributor(self.read_contrib, permissions.READ)
def test_public_project_published_preprint(self):
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('Has supplemental materials for', res.body.decode())
def test_public_project_abandoned_preprint(self):
self.preprint.machine_state = 'initial'
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_deleted_preprint(self):
self.preprint.deleted = timezone.now()
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_private_preprint(self):
self.preprint.is_public = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_unpublished_preprint(self):
self.preprint.is_published = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body.decode())
def test_public_project_pending_preprint_post_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('on {}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('on {}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
def test_implicit_admins_can_see_project_status(self):
project = ProjectFactory(creator=self.admin)
component = NodeFactory(creator=self.admin, parent=project)
project.add_contributor(self.write_contrib, permissions.ADMIN)
project.save()
preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=component, is_published=True)
preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
preprint.provider = provider
preprint.save()
url = component.web_url_for('view_project')
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body.decode())
def test_public_project_pending_preprint_pre_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='pre-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_in('Pending\n', res.body.decode())
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('{}'.format(self.preprint.provider.name), res.body.decode())
assert_not_in('Pending\n', res.body.decode())
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body.decode())
if __name__ == '__main__':
unittest.main()
|
988,760 | f30d2d1c1cb18368a54ef6aa6920f0f6004c48ea | import os
from setuptools import setup, find_packages
NAME='mito_sims_py3'
ROOT = os.path.abspath(os.path.dirname(__file__))
DESCRIPTION = "A tool for simulating apoptosis dynamics."
try:
with open(os.path.join(ROOT, 'README.md'), encoding='utf-8') as fid:
LONG_DESCRIPT = fid.read()
except FileNotFoundError:
LONG_DESCRIPT = ''
try:
with open(os.path.join(ROOT, NAME,'__version__'), 'r') as fid:
VERSION=fid.read().strip()
except FileNotFoundError:
VERSION='0.0.0error'
setup(name=NAME,
version=VERSION,
python_requires='>=3.6.0',
long_description=LONG_DESCRIPT,
long_description_content_type='text/markdown',
description = DESCRIPTION,
packages=find_packages(exclude=('pars',)),
package_data={
'':['__version__']
},
setup_requires=[
'numpy',
'scipy',
'matplotlib'],
install_requires=[
'matplotlib',
'scipy',
'numpy'
],
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'Intended Audience :: Developer',
'Intended Audience :: End Users',
'Topic :: biological data analysis'
]
)
# 'Development Status :: Alpha',
|
988,761 | c7941d1cc5228302661356d9c7bc30754e60df3e | from pymysql import *
# 创建数据库连接
conn = Connect(host='localhost', user='root', port=3306, password='itcast', db='mytestdb')
print(conn)
# 打开游标
cue = conn.cursor()
# 执行sql语句
name = input('输入用户名:')
passwd = input('输入密码:')
sql = "select * from student_ where name='%s' and passwd='%s'" % (name, passwd)
print(sql)
rowcount = cue.execute(sql)
if rowcount != 0:
print('登录成功')
else:
print('登陆失败')
# 关闭游标
cue.close()
# 关闭连接
conn.close()
|
988,762 | d9789bb2590d6938e0f6925c910be78d529dea3c | from prime import get_larger_prime
from polynomials import get_random_coefs, produce_shares, interpolate
"""
Produces the shares in a t-out-of-n shamir sharing scheme of a message
Parameters:
message: The message to be encoded and shared. Must be an integer.
t: The threshold of reconstruction
n: The number of shares to produce
Returns:
tuple (shares, p) where
shares is a list of (x, y) coordinates
p is the prime associated with the field those shares were produced in
"""
def share(message, t, n):
if n < t:
print "n < t"
return None
p = get_larger_prime(message)
coefs = [message] + get_random_coefs(t, p)
shares = produce_shares(coefs, n, p)
return shares, p
"""
Reconstructs the message from a list of shares
Parameters:
shares: list of (x, y) coordinates corresponding to shares
p: the prime associated with the sharing scheme
Returns:
message: the message
"""
def reconstruct(shares, p):
message = interpolate(shares, p)
return message
|
988,763 | d3c4b6f3ccaf526884b4aa86cc8d3f798f89a90f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author: Maxime Raynal <maxime.raynal@protonmail.com>
# LICENSE: MIT
import sys
from sudoku_solver import (
solution_from_dimacs_string, dimacs_string_from_constraints,
constraints_from_grid, grid_from_file
)
def main(args):
if len(args) < 2:
print(f"Usage: {args[0]} path_to_grid\nExiting")
return
filename = args[1]
solution = solution_from_dimacs_string(
dimacs_string_from_constraints(
constraints_from_grid(
grid_from_file(filename)
)
)
)
if solution == "UNSAT":
return
else:
print('\n'.join(
' '.join(
str(solution[i][j]) for j in range(9)
) for i in range(9)
))
if __name__ == '__main__':
main(sys.argv)
|
988,764 | c036dc3e4fd6e4cd0e420c70fbb49c10adabf479 | # -*- coding: cp1251 -*-
import os
logfile = open(ur'rezult-foto.txt', 'a') # Список файлов всех
log = list(open(ur'foto-site.txt'))
linkimg = []
for i in log:
if i.count('src') == True:
linkimg.append(i)
else:
pass
for i in linkimg:
logfile.write(i + '\n')
logfile.close() |
988,765 | 4bdddd94c8fb7b2d6fd2481b829a455f5e5e4ed6 | """Lambda entrypoint to handle API requests"""
import json
import os
from generate_presigned_url import create_presigned_url_put, create_presigned_url_get, get_s3_image_list, create_multiple_presigned_urls
from errors import BucketObjectError, ResourceNotFoundError
ALLOWED_HEADERS = 'Content-Type'
ALLOWED_ORIGINS = os.getenv("CLIENT_URI")
ALLOWED_METHODS = 'GET'
def lambda_handler(event, context):
try:
if event['resource'] == '/upload/public':
response = create_presigned_url_put(os.getenv("S3_IMAGE_BUCKET_NAME"))
elif event['resource'] == '/upload/private':
user_id = event['requestContext']['authorizer']['claims']['sub']
response = create_presigned_url_put(
os.getenv("S3_IMAGE_BUCKET_NAME"),
is_public=False,
user=user_id
)
elif event['resource'] == '/view-image/public':
image_list = get_s3_image_list(os.getenv("S3_PROCESSED_IMAGE_BUCKET_NAME"))
if image_list:
response = create_multiple_presigned_urls(image_list)
else:
raise BucketObjectError('No data in bucket')
elif event['resource'] == '/view-image/private':
user_id = event['requestContext']['authorizer']['claims']['sub']
image_list = get_s3_image_list(
os.getenv("S3_PROCESSED_IMAGE_BUCKET_NAME"),
is_public=False,
user=user_id
)
if image_list:
response = create_multiple_presigned_urls(image_list)
else:
raise BucketObjectError('No data in bucket')
else:
raise ResourceNotFoundError("Invalid Route")
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': ALLOWED_HEADERS,
'Access-Control-Allow-Origin': ALLOWED_ORIGINS,
'Access-Control-Allow-Methods': ALLOWED_METHODS
},
'body': json.dumps({'response': response})
}
except BucketObjectError as e:
return {
'statusCode': 400,
'body': json.dumps('Error in processing request: {}'.format(e))
}
except ResourceNotFoundError as e:
return {
'statusCode': 404,
'body': json.dumps('Error in processing request: {}'.format(e))
}
except Exception as e:
return {
'statusCode': 500,
'body': json.dumps('Error in processing request: {}'.format(e))
}
|
988,766 | 9bdd673c28aee00f0c8755e684356c944cbfd24b | #!/usr/local/python3
import smtplib
from smtplib import SMTP
HOST="smtp.163.com" #定义smtp主机
SUBJECT="test email form python" #定义邮件主题
TO = "younglovesara@gmail.com" #定义邮件收件人
FROM="eryoung2@163.com" #定义邮件发件人
text="python is test smtp" #邮件内容,编码为ASCII范围内的字符或字节字符串,所以不能写中文
BODY = '\r\n'.join(( #组合sendmail方法的邮件主体内容,各段以"\r\n"进行分离
"From: %s" %"admin",
"TO: %s" %TO,
"subject: %s" %SUBJECT,
"",
text
))
server = SMTP() #创建一个smtp对象
server.connect(HOST,'25') #链接smtp主机
server.login(FROM,"950102WTHDK") #邮箱账号登陆
server.sendmail(FROM,TO,BODY) #发送邮件
server.quit() #端口smtp链接 |
988,767 | 710af51315eb73a976d1b1489d59c41dc7123691 | import numpy as np
from usefulFuncs import predict, accuracy
class Optimizer(object):
def __init__(self, update_method = 'sgd'):
#if we use momentum update, we need to store velocities value
self.step_cache = {}
def l_bfgs(self, X,y ): pass
def train(self, X, y, X_val, y_val,
model, costFunction,
learningRate = 1e-2, momentum = 0, learningRateDecay = .95,
update = 'sgd',sampleBatches = True,
numEpochs = 30, batchSize = 1000, accFreq = None,
verbose =True):
m = X.shape[0]
#check if we are going to use minibatch or not
iterationsPerIter = 1
if(sampleBatches):
iterationsPerIter = int(m/batchSize)
numIters = numEpochs*iterationsPerIter
epoch = 0
best_val_acc = 0.0
best_model = {}
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(numIters):
if(it%10 ==0): print("starting iteration {}".format(str(it)))
if(sampleBatches):
#pick batchSize random values
batch_mask = np.random.choice(m, batchSize)
X_batch = X[batch_mask]
y_batch = y[batch_mask]
else:
X_batch = X
y_batch = y
#evaluate cost and gradient
cost,grads = costFunction(model, X_batch,y_batch)
loss_history.append(cost)
#param update
for p in model:
if(update == 'sgd'):
dx = -learningRate*grads[p]
elif(update == 'momentum'):
if(not(p in self.set_cache)):
self.step_cache[p] = np.zeros(grads[p].shape)
#momentum update:
dx = momentum*self.step_cache[p] - learningRate*grads[p]
self.step_cache[p] = dx
elif(update == 'rmsprop'):
decay_rate = .99
if(not(p in self.step_cache)):
self.step_cache[p] = np.zeros(grads[p].shape)
#RMSProp
self.step_cache[p] = decay_rate* self.step_cache[p] + (1-decay_rate)* grads[p]**2
dx = -learningRate * grads[p] / np.sqrt(self.step_cache[p] + 1e-8)
else:
raise ValueError('Unrecognized update type "%s"' % update)
model[p]+=dx #update params
# every epoch perform an evaluation on the validation set
first_it = (it == 0)
epoch_end = (it + 1) % iterationsPerIter == 0
acc_check = (accFreq is not None and it % accFreq == 0)
if first_it or epoch_end or acc_check:
if it > 0 and epoch_end:
# decay the learning rate
learningRate *= learningRateDecay
epoch += 1
# evaluate train accuracy
train_mask = np.random.choice(m, 1000)
X_train_subset = X[train_mask] if m > 1000 else X
y_train_subset = y[train_mask] if m > 1000 else y
# evaluate train accuracy
train_acc = accuracy(costFunction, model, X_train_subset, y_train_subset)
train_acc_history.append(train_acc)
# evaluate val accuracy
val_acc = accuracy(costFunction, model, X_val, y_val)
val_acc_history.append(val_acc)
# keep track of the best model based on validation accuracy
if val_acc > best_val_acc:
# make a copy of the model
best_val_acc = val_acc
best_model = {}
for p in model:
best_model[p] = model[p].copy()
# print progress if needed
if verbose:
print ('Finished epoch %d / %d: cost %f, train: %f, val %f, lr %e'
% (epoch, numEpochs, cost, train_acc, val_acc, learningRate))
if verbose:
print ('finished optimization. best validation accuracy: %f' % (best_val_acc, ))
# return the best model and the training history statistics
return best_model, loss_history, train_acc_history, val_acc_history
|
988,768 | 0e53cfc9db3eeb16dd8b39b69982035cc271eaa1 | from random import randint
success=0
attempts=10000
for ir in range (attempts):
if randint(0,1)+randint(0,1)+randint(0,1)+randint(0,1)==3:
success+=1
print("Number of attempts", attempts)
print("Number of success",success)
|
988,769 | 88a809388fb1ef5e54b7b13074e16370eb7224d1 | import doctest
import importlib
import unittest
from shared import utils
YEARS = [
2020,
]
def day_tests(tests):
day = 0
for year in YEARS:
while True:
day += 1
try:
mod = importlib.import_module('aoc%s.day%s' % (year, day))
tests.addTests(doctest.DocTestSuite(mod))
except ModuleNotFoundError:
break
return tests
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(utils))
day_tests(tests)
return tests
if __name__ == "__main__":
unittest.main() |
988,770 | 0052ccd6e0505a9568f4ea92947c21635d9c367b | from selenium import |
988,771 | 037d7f1bad5896ae1fa1d2495dfc336650a25846 | # Created by Qingzhi Ma at 2019-07-24
# All right reserved
# Department of Computer Science
# the University of Warwick
# Q.Ma.2@warwick.ac.uk
import pandas as pd
def convert_df_to_yx(df, x, y):
return df[y].values, df[x].values.reshape(-1, 1)
def get_group_count_from_df(df, group_attr, convert_to_str=True):
# print(group_attr)
# print(df[group_attr])
if convert_to_str:
df[group_attr] = df[group_attr].astype(str)
grouped = df.groupby(by=group_attr)
counts = {}
for name, group in grouped:
counts[name] = group.shape[0]
return counts
def get_group_count_from_table(file, group_attr, sep=',', headers=None):
df = pd.read_csv(file, sep=sep, dtype={
group_attr: object}, keep_default_na=False, header=None, names=headers)
return get_group_count_from_df(df, group_attr, convert_to_str=False)
def get_group_count_from_summary_file(file, sep=','):
counts = {}
with open(file, 'r') as f:
for line in f:
splits = line.split(sep)
key = ",".join(splits[:-1])
counts[key] = int(splits[-1])
return counts
|
988,772 | d313737e373e0f832bee1a3080ad5d93e0360f4c | # -*- coding:utf-8 -*-
import numpy as np
#State(Memory、Utilization、Reference Time)
#Action (mp、dp、batch_size)
# 为了简化运算,定义以下规则,此规则映射了每个状态可采取的动作:
# Memory(M)-> mp
# Utilization(U)-> dp
# Reference_time(T)-> batch_size
# 状态转移流程为 M->U->R
# reward,col represent action,row represent state
"""
Action:
mps = [1, 2, 4, 8, 16, 32]
batchs = [1, 2, 4]
dataparallels = [1, 2, 4, 8, 16, 32]
================
State:
可用的内存 Memory: [1,2,4,8,16, 32](与数据并行度一一对应,num*单个模型需要的内存)
可用的core:[1, 2, ...., 32]
推理的时间:[1, 2, 3] , 1代表good, 2代表normal,3代表bad
"""
# reward matrix
# 内存使用量越多分数越低、core使用越多分数越低、推理时间越短分数越高
# 当前reward值与前一个状态相关
# (M, mp) = 1/mem; (U, dp) = 1/(dp*mp); (T, batch_size) = 1/reference_time
# Actions:
mps = [1, 2, 4, 8, 16]
batchs = [1, 2, 4]
dps = [1, 2, 4, 8, 16]
"""
action set 定义为:
NO mp dp score(reward, throughput/(mp*dp*time))
0 1 1 1.256
1 1 2 2.091
2 1 4 2.980
3 1 8 3.539
4 1 16 5.318
5 2 1 1.695
6 2 2 2.890
7 2 4 3.894
8 2 8 2.737
9 2 16 1.154
10 4 1 1.861
11 4 2 2.212
12 4 4 2.395
13 4 8 1.221
14 8 1 1.526
15 8 2 1.632
16 8 4 1.438
17 16 1 0.877
18 16 2 0.776
"""
action_set = [i for i in range(19)]
"""
"""
action_reward_map = {
0 : 4.954809418,
1 : 8.51707657,
2 : 12.32754352,
3 : 12.85317121,
4 : 9.161624003,
5 : 6.36743321,
6 : 10.53608601,
7 : 12.6275721,
8 : 7.348988945,
9 : 2.797860738,
10 : 7.060278158,
11 : 7.522317377,
12 : 8.550589954,
13 : 4.776430015,
14 : 5.443691438,
15 : 5.317585384,
16 : 4.502125077,
17 : 3.393294033,
18 : 2.514767144
}
action_mpdp_map = [1, 1, #action_set 0
1, 2, #action_set 1 ...
1, 4, #action_set 2
1, 8, #action_set 3
1, 16, #action_set 4
2, 1, #action_set 5
2, 2, #action_set 6
2, 4, #action_set 7
2, 8, #action_set 8
2, 16, #action_set 9
4, 1, #action_set 10
4, 2, #action_set 11
4, 4, #action_set 12
4, 8, #action_set 13
8, 1, #action_set 14
8, 2, #action_set 15
8, 4, #action_set 16
16, 1, #action_set 17
16, 2 #action_set 18
]
# States:
mem_s = [1, 2, 4, 8, 16, 32]
core_s = [i for i in range(1, 33)]
# reward 矩阵初始化
row = len(core_s)
col = len(action_set)
reward = np.zeros([row, col])
#init reward matrix
for cur_state in core_s:
for action in action_set:
if cur_state >= action_mpdp_map[2*action] * action_mpdp_map[2*action+1]:
reward[cur_state-1][action] = action_reward_map[action]
else:
reward[cur_state-1][action] = 0
"""
保存reward矩阵
wf = open("reward_matrix_1.csv", 'w+')
for cur_state in core_s:
for action in action_set:
wf.write(str(reward[cur_state-1][action]))
wf.write("\t")
wf.write("\n")
wf.close()
"""
# hyperparameter
gamma = 0.8
epsilon = 0.4
#Q(state, action) = Reward(state, action) + Gamma*Max(Q(state+1, all actions))
Q = np.zeros([row, col])
MAX_UNUSED_CORE = 32
for episode in range(201):
# 随机选择一个状态
state = np.random.randint(1, 32)
unused_core = MAX_UNUSED_CORE - state
# 当没有可用core时为终止条件
while(unused_core > 0):
possible_actions = []
possible_q = []
for action in action_set:
if(reward[state][action] > 0):
possible_actions.append(action)
possible_q.append(Q[state, action])
# Step next state, here we use epsilon-greedy algorithm.
action = -1
if np.random.random() < epsilon:
# choose random action
action = possible_actions[np.random.randint(0, len(possible_actions))]
else:
# greedy
action = possible_actions[np.argmax(possible_q)]
# Update Q value
Q[state, action] = reward[state, action] + gamma * Q[action].max()
# Go to the next state
state = action_mpdp_map[action*2] * action_mpdp_map[action*2+1]
unused_core = unused_core - state
# Display training progress
if episode % 10 == 0:
print("------------------------------------------------")
print("Training episode: %d" % episode)
print(Q)
# save Q matrix
qf = open("Q-matrix-resnet50 .csv", "w+")
[rows, cols] = Q.shape
for i in range(rows):
for j in range(cols):
qf.write(str(Q[i][j]))
qf.write("\t")
qf.write("\n")
|
988,773 | 153b3572fa2abed147495b94459434445fee3da8 | from . import biote_allow_wiz
from . import biote_unqualified_done_wiz
|
988,774 | 31027d43c726ba6caf538557e8291759a1c435f5 | palavra = input('Digite uma palavra:')
arvalap = ''
counter = 0
counter1 = 1
funny_or_not = 0
limite = len(palavra)
for i in range(limite-1,-1, -1):
arvalap += palavra[i]
while counter1 < limite:
if ord(palavra[counter]) - ord(palavra[counter1]) == ord(arvalap[counter1]) - ord(arvalap[counter]):
funny_or_not += 1
if ord(palavra[counter]) - ord(palavra[counter1]) != ord(arvalap[counter1]) - ord(arvalap[counter]):
funny_or_not -= 1000
counter1 += 1
counter += 1
if funny_or_not < 0:
print('Not Funny :(')
elif funny_or_not > 0:
print('Funny :)')
|
988,775 | 400aa8eda9b39636eddaf5a15f76c52c1eb8c93c | from operator import mul
from functools import reduce
def cmb(n, r):
r = min(n - r, r)
if r == 0:
return 1
over = reduce(mul, range(n, n - r, -1))
under = reduce(mul, range(1, r + 1))
return over // under
N, A, B, *v = map(int, open(0).read().split())
v.sort(reverse=True)
answer = sum(v[:A]) / A
n = 0
if v[0] == v[A - 1]:
N = v.count(v[0])
n = cmb(N, min(N, B))
tmp_cmb = n
for i in range(min(B, N) - 1, A - 1, -1):
tmp = N - i
tmp_cmb = tmp_cmb * (i + 1) // tmp
n += tmp_cmb
else:
n = cmb(v.count(v[A - 1]), A - v.index(v[A - 1]))
print(answer)
print(n) |
988,776 | 5ce603de5c2de1b83d415bdea948d18967f01a28 | import os
from flask import Flask
VERSION = '1.3'
app = Flask(__name__)
@app.get('/')
def hello():
return f'hi! version: {VERSION}'
if __name__ == '__main__':
port = int(os.environ.get("PORT"))
app.run(host='0.0.0.0', port=port)
|
988,777 | 2443b0e15ffbbd7ea09e828bd9b40bde0bbcf16a | from tflearn.data_utils import build_hdf5_image_dataset
import h5py
path = '/home/suger/workspace/pig-face-recognition/raw_data/txt/'
# filenum = 1
# filename = 'train_data'
# files = []
# result = []
# for i in range(0, filenum):
# files.append(path + filename + str(i) + '.txt')
# result.append(filename + str(i) + '.h5')
# build_hdf5_image_dataset(files[i], image_shape=(448, 448), mode='file', output_path=result[i], categorical_labels=True, normalize=False)
# print('Finish dataset ' + result[i])
filenum = 1
filename = 'validation_data'
files = []
result = []
for i in range(0, filenum):
files.append(path + filename + str(i) + '.txt')
result.append(filename + str(i) + '.h5')
build_hdf5_image_dataset(files[i], image_shape=(448, 448), mode='file', output_path=result[i], categorical_labels=True, normalize=False)
print('Finish dataset ' + result[i])
|
988,778 | 0d5d93680b431551f375288e827f15a9d9bcbb53 | import knn
import svm
import mlp
import dt
import boosting
import learning_curve
|
988,779 | 78fc1ed8b03169cc41d8cb21f31094c01776c07d | from setuptools import setup
setup(
name="app",
script=['manage'],
) |
988,780 | 23036b849e21c3eebe5764f33e31ab870054fa71 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 20:35:15 2020
@author: shubham
"""
import math
C=50
H=30
result=[]
list1=[]
D=input('Enter the value of d : ')
list=[D for D in D.split(",")]
list1 = [int(i) for i in list]
i=0
for i in list1:
S= ((2 * C * i)/H)
Q= round(math.sqrt(S))
result.append(Q)
print('output',result)
|
988,781 | 1a6d1eab9eeb856b8e7729b6f0c9c1fce4e2b0e1 | from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.account_code_1 import AccountCode1
__NAMESPACE__ = "http://www.travelport.com/schema/util_v52_0"
@dataclass
class AirUpsellOfferSearchCriteria:
"""
Search criteria for AirUpsellOffers.
"""
class Meta:
namespace = "http://www.travelport.com/schema/util_v52_0"
account_code: None | AccountCode1 = field(
default=None,
metadata={
"name": "AccountCode",
"type": "Element",
"namespace": "http://www.travelport.com/schema/common_v52_0",
}
)
class_of_service: None | str = field(
default=None,
metadata={
"name": "ClassOfService",
"type": "Attribute",
"required": True,
"min_length": 1,
"max_length": 2,
}
)
|
988,782 | ca92a6f3818cabd023d481e4f10fbcfed8d5fb2b |
# coding: utf-8
# In[1]:
import numpy as np
import cv2
import random
from matplotlib import pyplot as plt
UBIT ='manishre'
np.random.seed(sum([ord(c) for c in UBIT]))
# In[2]:
def epipolarLines(left_image,right_image,lines,src_pts,des_pts):
r,c = left_image.shape[:2]
for r,pt1,pt2 in zip(lines,src_pts,des_pts):
color = tuple(np.random.randint(0,255,3).tolist()) # picking random color for each line
x0,y0 = map(int, [0, -r[2]/r[1] ]) #
x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
left_image = cv2.line(left_image, (x0,y0), (x1,y1), color,1)
left_image = cv2.circle(left_image,tuple(pt1),5,color,-1)
right_image = cv2.circle(right_image,tuple(pt2),5,color,-1)
return left_image,right_image
# In[3]:
image1 = cv2.imread('tsucuba_left.png')
image2 = cv2.imread('tsucuba_right.png')
image3 = cv2.imread('tsucuba_left.png',0)
image4 =cv2. imread('tsucuba_right.png',0)
# In[4]:
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
detected_points1, descriptors1 = sift.detectAndCompute(image1.copy(),None)
detected_points2, descriptors2 = sift.detectAndCompute(image2.copy(),None)
detect_image1 =cv2.drawKeypoints(image1,detected_points1,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
detect_image2 = cv2.drawKeypoints(image2,detected_points2,None,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(descriptors1,descriptors2,k=2)
good = []
good_pts =[]
pts1 = []
pts2 = []
# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
if m.distance < 0.75*n.distance:
good.append(m)
good_pts.append([m])
pts2.append(detected_points2[m.trainIdx].pt)
pts1.append(detected_points1[m.queryIdx].pt)
featureMacthing = cv2.drawMatchesKnn(image1.copy(),detected_points1,image2.copy(),detected_points2,good_pts,None,flags=2)
#======================================PART2=====================================================================
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
fundamentalMatrix, mask = cv2.findFundamentalMat(pts1,pts2,cv2.RANSAC)
print('===FUNDAMENTAL MATRIX===')
print(fundamentalMatrix)
# We select only inlier points
pts1 = pts1[mask.ravel()==1]
pts2 = pts2[mask.ravel()==1]
print(len(pts1))
key=[]
for i in range(10):
key.append(random.randint(1,100))
print('key',key)
pts3 =[]
pts4 =[]
for i in key:
pts3.append(pts1[i])
pts4.append(pts2[i])
pts3 =np.asarray(pts3)
pts4 =np.asarray(pts4)
#============================================Part3=========================================================================
inliners_left = cv2.computeCorrespondEpilines(pts3.reshape(-1,1,2), 2,fundamentalMatrix)
inliners_left = inliners_left.reshape(-1,3)
image_l2r,image_l2rp = epipolarLines(image1.copy(),image2.copy(),inliners_left,pts3,pts4)
inliners_right =cv2.computeCorrespondEpilines(pts4.reshape(-1,1,2),2,fundamentalMatrix)
inliners_right = inliners_right.reshape(-1,3)
image_r2l,image_r2lp = epipolarLines(image2.copy(),image1.copy(),inliners_right,pts3,pts4)
#==============================================part4========================================================================
depthMap = cv2.StereoBM_create(numDisparities=64, blockSize=21)
#depthMap = cv2.createStereoBM(numDisparities=64, blockSize=21)
__disparityMap = depthMap.compute(image3,image4)
__disparityMap = (__disparityMap,0)
# In[ ]:
cv2.imshow('keypoint1',detect_image1)
cv2.waitKey(0)
cv2.imshow('keypoints2',detect_image2)
cv2.waitKey(0)
cv2.imshow('Feature Matching',featureMacthing)
cv2.waitKey(0)
cv2.imshow('epipolarLines from Left to right',image_l2r)
cv2.waitKey(0)
cv2.imshow('epipolar points from left to right',image_l2rp)
cv2.waitKey(0)
cv2.imshow('epipolarlines from right to left',image_r2l)
cv2.waitKey(0)
cv2.imshow('epipolar points from right to left',image_r2lp)
cv2.waitKey(0)
plt.imshow(__disparityMap,'gray')
cv2.destroyAllWindows()
#print(fundamental)
# In[ ]:
cv2.imwrite('task2_sift1.jpg',detect_image1)
cv2.imwrite('task2_sift2.jpg',detect_image2)
cv2.imwrite('task2_matches_knn.jpg',featureMacthing)
cv2.imwrite('task2_epi_right.jpg',image_l2r)
cv2.imwrite('task2_epi_left.jpg',image_r2l)
cv2.imwrite('task2_disparity.jpg',__disparityMap)
|
988,783 | e8875b894b694ac5a8a255d0e43368c6640230c7 | # 系统
## ====================
import os
import subprocess
import sys
## ====================
# 定时
## ====================
import schedule
## ====================
# 网络请求
## ====================
import requests
## ====================
# 解析
## ====================
import json
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except:
from yaml import Loader, Dumper
## ====================
# Redis
## ====================
import ownlib.redis_manager as rdm
## ====================
# Redis
## ====================
import ownlib.repo_manager as rm
## ====================
# Config
## ====================
ALREADY_EXIST_REPO = "config/already_clone.yaml"
REPO_INFO = "warehouse/zCore_realm/config"
PWD = "/home/own/MengXia"
## ====================
## redis实例
redisManager = rdm.RedisManager()
## repos实例
repoManager = rm.RepoManager()
PWD = sys.path[0]
def clone_fn(repo):
# 1. 设置 工作 目录
print("======================================")
print("repo clone ", repo.user, " - ", repo.name)
print("======================================")
# print("切换 工作 目录 ----->")
# os.chdir(PWD)
# print("切换前:" + str(os.system("pwd")))
# os.chdir("./warehouse/" + repo.name + "_realm/" + repo.user)
# print("切换后:" + str(os.system("pwd")))
# switch_dir("./warehouse/" + repo.name + "_realm/" + repo.user)
# 2. clone 仓库
try:
print("开始 clone")
# os.system("git clone "+repo.url+" --recursive --depth 1")
# os.system("git clone " + repo.url + " --recursive")
subprocess.run("git clone " + repo.url + " --recursive",shell=True,check=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user)
except:
subprocess.run("rm -rf warehouse/" + repo.name + "_realm/" + repo.user + "/" + repo.name,shell=True)
print("clone 失败 ,重置工作目录 退出 clone")
return
# clone 完成 标记
res = redisManager.add_exist_repo(repo.url)
if res == 1:
print("添加成功")
else:
print("添加失败,已存在")
# 3. 探测分支
# print("分支获取")
# try:
# header = {'Accept': 'application/vnd.github.v3+json'}
# url = 'https://api.github.com/repos/' + repo.user + '/' + repo.name + '/branches'
# res = requests.get(url=url, headers=header)
# json_branches = res.text
# except:
# print("请求失败")
# os.chdir(PWD)
# return
# chech_branch_json_validity(json_branches)
# print("请求成功")
# # 转化 json
# branches = json.loads(json_branches)
# print("准备 branch dir")
# for branche in branches:
# prepare_branch_dir(branche["name"])
# print("准备 完毕")
branches = request_repo_branches(repo)
# 记录分支
redisManager.save_branch_info(repo, branches)
# os.chdir(PWD)
def update_fn(repo):
if not check_for_update_available(repo):
return
print("======================================")
print("repo update ", repo.user, " - ", repo.name)
print("======================================")
# 请求 仓库 分支 信息
branches = request_repo_branches(repo)
# 对比 与 本地 的 情况 返回 变更的 分支
need_branches = compare_branch_info(repo, branches)
if len(need_branches) != 0:
update_branch(repo, need_branches)
mark_to_test(repo, need_branches)
update_branch_info(repo, branches)
# os.chdir(PWD)
# try:
# header = {'Accept': 'application/vnd.github.v3+json'}
# url = 'https://api.github.com/repos/' + repo.user + '/' + repo.name + '/branches'
# res = requests.get(url=url, headers=header)
# json_branches = res.text
# except:
# print("请求失败")
# os.chdir(PWD)
# return
# # 检测 合法性
# if not chech_branch_json_validity(json_branches):
# return
# print("请求成功")
# 1. 获取 branch 信息
# 2. 解析 branch 信息
# 3. 依据 新 branch 信息 和 本地 branch 信息 做对比
# 4. 对 变化的 branch 进入 相对应的 branch 进行 更新
# 5. 标记 此 branch 需要 测试
def register_watch(repo, clone_fn, update_fn):
if repo.url in repoManager.already_exist_repo:
print(repo.user + ":" + repo.name + " repo存在 开始 监测更新")
schedule.every(10).seconds.do(update_fn, repo)
else:
print(repo.user + ":" + repo.name + " repo不存在 clone并 开始 监测更新")
# if os.path.exists("warehouse/" + repo.name + "_realm/" + repo.user +
# "/" + repo.name):
# os.system("rm -rf warehouse/" + repo.name + "_realm/" + repo.user +
# "/" + repo.name)
# os.chdir(PWD)
clone_fn(repo)
schedule.every(1).minutes.do(update_fn, repo)
def main():
os.chdir(PWD) # 设置工作目录
for repo in repoManager.repos:
prepare_dir(repo)
register_watch(repo, clone_fn, update_fn)
## 开启监听
while True:
schedule.run_pending()
# ================== 仓库 区
def prepare_dir(repo):
# 幂等性
print("准备 文件目录")
subprocess.run("mkdir -p warehouse/" + repo.name + "_realm/config",shell=True)
subprocess.run("mkdir -p warehouse/" + repo.name + "_realm/scripts",shell=True)
subprocess.run("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user + "/config",shell=True)
subprocess.run("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user + "/diff",shell=True)
subprocess.run("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user + "/result",shell=True)
subprocess.run("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user + "/logfile",shell=True)
subprocess.run("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user + "/help_info",shell=True)
# os.system("mkdir -p warehouse/" + repo.name + "_realm/config")
# os.system("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user +
# "/config")
# # os.system("cp config/all-test-cases.txt " + repo.user +
# # "/config/all-test-cases.txt")
# os.system("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user +
# "/diff")
# os.system("mkdir -p warehouse/" + repo.name + "_realm/" + repo.user +
# "/result")
def prepare_branch_dir(branch,repo):
subprocess.run("mkdir -p config/" + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user)
subprocess.run("mkdir -p diff/" + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user)
subprocess.run("mkdir -p result/" + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user)
subprocess.run("mkdir -p logfile/" + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user)
subprocess.run("mkdir -p help_info/" + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user)
# os.system("mkdir -p config/" + branch)
# os.system("mkdir -p diff/" + branch)
# os.system("mkdir -p result/" + branch)
def request_repo_branches(repo):
try:
header = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': 'token fa5b00ae8df79e2f97b0017d4b22cd49245fa8ad '
}
url = 'https://api.github.com/repos/' + repo.user + '/' + repo.name + '/branches'
res = requests.get(url=url, headers=header)
json_branches = res.text
except:
print("请求失败")
# os.chdir(PWD)
return []
chech_branch_json_validity(json_branches)
print("请求成功")
# 转化 json
branches = json.loads(json_branches)
# switch_dir("./warehouse/" + repo.name + "_realm/" + repo.user)
print("准备 branch dir")
for branche in branches:
prepare_branch_dir(branche["name"],repo)
print("准备 完毕")
branches_list = convert_string_list(branches)
return branches_list
def chech_branch_json_validity(json_branches):
# 边界 设置
if len(json_branches) == 0:
print("请求失败")
# os.chdir(PWD)
return False
if json_branches.startswith('{'):
print("值不对 {")
# os.chdir(PWD)
return False
return True
def compare_branch_info(repo, curr_branches):
res = []
# print(curr_branches)
if len(curr_branches) == 0:
return res
print("比较 分支 信息")
# print("当前远程 仓库 上 的 分支信息 ")
curr = set()
for b in curr_branches:
curr.add(str(b).replace("'", "\""))
# print("curr")
# print(curr)
last = redisManager.read_branch_info(repo)
# print("last")
# print(last)
if len(last) == 0:
redisManager.save_branch_info(repo, curr_branches)
change = curr - last
# print("change")
# print(change)
if len(change) != 0:
print("有变化 : ", change)
for o in change:
# branch = o.split(":")[1].split(",")[0].replace("\"", "").strip()
branch = o.split(":")[0].strip()
print("branch : ", branch)
if branch == "gh-pages":
print("pages 无需 测试")
continue
res.append(branch)
else:
print("无变化、无需更新")
print(res)
return res
def update_branch(repo, modified_branches):
# print("切换 工作 目录 ----->")
# print("切换前:" + str(os.system("pwd")))
# os.chdir("./" + repo.name)
# print("切换后:" + str(os.system("pwd")))
# switch_dir("./warehouse/" + repo.name + "_realm/" + repo.user + "/" +
# repo.name)
tmp = subprocess.run("git branch",shell=True,stdout=subprocess.PIPE,encoding="utf-8",cwd="./warehouse/" + repo.name + "_realm/" + repo.user + "/" + repo.name)
branch_res = list(map(lambda x: x.strip(),tmp.stdout.strip().split("\n")))
for branch in modified_branches:
for br in branch_res:
print("匹配 ", br)
if br.startswith("*") and branch == br.replace("*", "").strip():
print("匹配 成功 且 当前分支 就是", branch)
break
if br == branch:
print("匹配 成功 ")
print("checkout :", branch)
subprocess.run("git checkout " + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user + "/" + repo.name)
break
print("branch 匹配 失败")
else:
print("创建 分支 并 切换")
subprocess.run("git checkout -b " + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user + "/" + repo.name)
try:
print("pulling....")
subprocess.run("git fetch origin " + branch,shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user + "/" + repo.name)
# res1 = os.popen("git fetch origin " + branch).readline()
# print(res1)
subprocess.run("git reset --hard FETCH_HEAD",shell=True,cwd="warehouse/" + repo.name + "_realm/" + repo.user + "/" + repo.name)
# res2 = os.popen("git reset --hard FETCH_HEAD").readline()
# print(res2)
except:
print("pull 不符合预期")
return
def mark_to_test(repo, modified_branches):
print("标记 需要 测试的 仓库 分支")
redisManager.mark_to_test(repo, modified_branches)
pass
def update_branch_info(repo, curr_branches):
print("更新 仓库 info 信息")
redisManager.update_branch_info(repo, curr_branches)
pass
def check_for_update_available(repo):
if redisManager.test_is_running(repo):
print("测试 运行 中")
print("稍后更新")
return False
return True
def convert_string_list(ls):
s = []
for l in ls:
item = "{0}:{1}".format(l['name'], l['commit']['sha'])
s.append(item)
return s
# def switch_dir(path):
# print("切换 工作 目录 ----->")
# os.chdir(PWD)
# print("切换前:")
# subprocess.run("pwd")
# os.chdir(path)
# print("切换后:")
# subprocess.run("pwd")
if __name__ == '__main__':
main()
|
988,784 | 56a0f2e186004edd0cda23b7fb563c2e2a905c10 | import numpy as np
import sys
sys.path.insert(0, './breast_segment/breast_segment')
from breast_segment import breast_segment
from matplotlib import pyplot as plt
import PIL
import cv2
import warnings
from medpy.filter.smoothing import anisotropic_diffusion
import math
import random
import statistics
import os
DEBUG = True
def find_start_and_end_points(im):
start = None
end = None
i = im.shape[1] - 1
while i > 0 and start is None:
if im[im.shape[0]-1, i] != 0:
start = (im.shape[0]-1, i)
i = i - 1
if start is None:
i = im.shape[0] - 1
while i > 0 and start is None:
if(im[i, 0] != 0):
start = (i, 0)
i = i - 1
i = im.shape[1] - 1
while i > 0 and end is None:
if im[0, i] > 0:
end = (0, i)
i = i - 1
return start, end
def canny(im):
img = np.zeros([im.shape[0],im.shape[1],1])
img[:,:,0] = im
img = img.astype(np.uint8)
if DEBUG:
cv2.imshow("first_image",img)
high_thresh, thresh_im = cv2.threshold(img[:int(img.shape[0]), :int(img.shape[1]/2)], 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
lowThresh = 0.5*high_thresh
edges = cv2.Canny(thresh_im, lowThresh, high_thresh)
if DEBUG:
cv2.imshow("threshimage",thresh_im)
img2 = np.zeros([im.shape[0],im.shape[1],1])
for i in range(edges.shape[0]):
for j in range(edges.shape[1]):
img2[i, j,0]=edges[i, j]
if DEBUG:
cv2.imshow("edges",img2)
return img2[:,:,0]
def count_top_left_neighbors(im, pos):
toCheck = [pos]
allSpots = []
while len(toCheck) > 0:
p = toCheck.pop(0)
allSpots.append(p)
# Check above
if p[0] > 0 and im[p[0]-1, p[1]] != 0 and (p[0]-1, p[1]) not in allSpots:
toCheck.append((p[0]-1, p[1]))
# Check to left
if p[1] > 0 and im[p[0], p[1]-1] != 0 and (p[0], p[1]-1) not in allSpots:
toCheck.append((p[0], p[1]-1))
if p[0] > 0 and p[1] > 0 and im[p[0]-1, p[1]-1] != 0 and (p[0]-1, p[1]-1) not in allSpots:
toCheck.append((p[0]-1, p[1]-1))
return allSpots
def count_bot_right_neighbors(im, pos):
toCheck = [pos]
allSpots = []
while len(toCheck) > 0:
p = toCheck.pop(0)
allSpots.append(p)
# Check below
if p[0] < im.shape[0]-1 and im[p[0]+1, p[1]] != 0 and (p[0]+1, p[1]) not in allSpots:
toCheck.append((p[0]+1, p[1]))
# Check to right
if p[1] < im.shape[1]-1 and im[p[0], p[1]+1] != 0 and (p[0], p[1]+1) not in allSpots:
toCheck.append((p[0], p[1]+1))
if p[0] < im.shape[0]-1 and p[1] < im.shape[1]-1 and im[p[0]+1, p[1]+1] != 0 and (p[0]+1, p[1]+1) not in allSpots:
toCheck.append((p[0]+1, p[1]+1))
return allSpots
def return_connected_components(im):
components = []
checked = []
for y in range(im.shape[0]):
for x in range(im.shape[1]):
if im[y, x] != 0 and (y, x) not in checked:
component, wasChecked = get_component(im, (y, x))
for item in wasChecked:
checked.append(item)
components.append(component)
return components
def get_component(im, spot):
component = []
checked = []
toCheck = [spot]
while len(toCheck) != 0:
s = toCheck.pop(0)
checked.append(s)
component.append(s)
neighbors = get_nonzero_neighbors(im, s)
for n in neighbors:
if n not in checked and n not in toCheck:
toCheck.append(n)
return component, checked
def find_greatest_dist(points):
greatestDist = ((points[0][0] - points[1][0]) ** 2 + (points[0][1] - points[1][1]) ** 2) ** 0.5
ends = [points[0], points[1]]
for i in range(len(points)-1):
for j in range(i+1, len(points)):
d = ((points[i][0] - points[j][0]) ** 2 + (points[i][1] - points[j][1]) ** 2) ** 0.5
if greatestDist < d:
greatestDist = d
ends = [points[i], points[j]]
return ends
def get_slope(im, component):
ends = []
for item in component:
if len(get_nonzero_neighbors(im, item)) <= 2:
ends.append(item)
if len(ends) > 2:
ends = find_greatest_dist(ends)
slope = None
theEnd = None
if len(ends) < 2:
slope = None
elif ends[0][1] - ends[1][1] == 0:
slope = 0
elif ends[0][0] - ends[1][0] == 0:
slope = None
else:
slope = (ends[0][0] - ends[1][0]) / (ends[0][1] - ends[1][1])
theEnd = ends[0]
return slope, theEnd
def color_image_components(im, components):
im_color = np.zeros([im.shape[0],im.shape[1],3])
im_color[:,:,0] = im
im_color[:,:,1] = im
im_color[:,:,2] = im
for component in components:
c = [random.random(), random.random(), random.random()]
for item in component:
im_color[item] = c
#cv2.imshow("image",im_color)
#cv2.waitKey(0)
def get_nonzero_neighbors(im, spot):
neighbors = []
if spot[0] > 0 and im[spot[0]-1, spot[1]] != 0:
neighbors.append((spot[0]-1, spot[1]))
if spot[1] > 0 and im[spot[0], spot[1]-1] != 0:
neighbors.append((spot[0], spot[1]-1))
if spot[0] < im.shape[0]-1 and im[spot[0]+1, spot[1]] != 0:
neighbors.append((spot[0]+1, spot[1]))
if spot[1] < im.shape[1]-1 and im[spot[0], spot[1]+1] != 0:
neighbors.append((spot[0], spot[1]+1))
if spot[0] > 0 and spot[1] > 0 and im[spot[0]-1, spot[1]-1] != 0:
neighbors.append((spot[0]-1, spot[1]-1))
if spot[0] > 0 and spot[1] < im.shape[1]-1 and im[spot[0]-1, spot[1]+1] != 0:
neighbors.append((spot[0]-1, spot[1]+1))
if spot[0] < im.shape[0]-1 and spot[1] > 0 and im[spot[0]+1, spot[1]-1] != 0:
neighbors.append((spot[0]+1, spot[1]-1))
if spot[0] < im.shape[0]-1 and spot[1] < im.shape[1]-1 and im[spot[0]+1, spot[1]+1] != 0:
neighbors.append((spot[0]+1, spot[1]+1))
return neighbors
def return_bad_components(im, components, x_int, y_int):
bad_components = []
for component in components:
if len(component) < 2:
bad_components.append(component)
else:
m, point = get_slope(im, component)
if m is None:
bad_components.append(component)
elif m == 0:
bad_components.append(component)
elif math.degrees(math.atan(-1 * m)) > 90 or math.degrees(math.atan(-1 * m)) < 10:
bad_components.append(component)
"""else:
component_x_int = point[1] - (point[0] / m)
component_y_int = -1 * m * point[1] + point[0]
if component_x_int > x_int or component_y_int > y_int:
bad_components.append(component)"""
return bad_components
def clean_canny(im, start, end):
if start == None or end == None:
return
im_og = np.copy(im)
if end[1] == start[1]:
start = (start[0], start[1]+1)
if end[0] == start[1]:
start = (start[0]+1, start[1])
m = (start[0] - end[0]) / (end[1] - start[1])
if start[1] > end[1]:
m = -m
y_int = m * end[1]
# Remove lines beyond the breast boundary
for y in range(im.shape[0]):
for x in range(im.shape[1]):
if im[y, x] != 0 and y > x * -m + y_int:
im[y, x] = 0
# Remove horizontal lines
for y in range(im.shape[0]):
for x in range(1, im.shape[1]-1):
if im[y, x] != 0 and im[y, x-1] != 0 and im[y, x+1] != 0:
im[y, x-1] = 0
im[y, x+1] = 0
"""
# Remove diagonal lines
for y in range(1, im.shape[0]-1):
for x in range(1, im.shape[1]-1):
if im[y, x] != 0 and (im[y-1, x-1] != 0 and im[y+1, x+1] != 0 and im[y-1, x+1] != 0) or (im[y-1, x-1] != 0 and im[y+1, x+1] != 0 and im[y+1, x-1] != 0) or (im[y-1, x-1] != 0 and im[y+1, x-1] != 0 and im[y-1, x+1] != 0) or (im[y-1, x+1] != 0 and im[y+1, x+1] != 0 and im[y+1, x-1] != 0):
top_left = count_top_left_neighbors(im, (y, x))
bot_right = count_bot_right_neighbors(im, (y, x))
if len(top_left) >= 3:
im[y-1, x-1] = 0
if len(bot_right) >= 3:
im[y+1, x+1] = 0
# Remove half-bullnose + bullnose
for y in range(1, im.shape[0]-1):
for x in range(1, im.shape[1]-1):
if im[y, x] != 0 and im[y+1, x+1] != 0:
bot_right = count_bot_right_neighbors(im, (y, x))
if len(bot_right) >= 3:
im[y+1, x+1] = 0
"""
components = return_connected_components(im)
x_int = end[1]
im_before_component_removal = im.copy()
color_image_components(im, components)
bad_components = return_bad_components(im, components, x_int, y_int)
for component in bad_components:
for item in component:
im[item[0], item[1]] = 0
if DEBUG:
cv2.imshow("im_og",im_og)
if DEBUG:
cv2.imshow("im_before_component_removal",im_before_component_removal)
if DEBUG:
cv2.imshow("im",im)
return im
def get_bounding_box(component):
left = component[0][1]
right = component[0][1]
top = component[0][0]
bot = component[0][0]
for item in component:
if item[0] < top:
top = item[0]
if item[0] > bot:
bot = item[0]
if item[1] < left:
left = item[1]
if item[1] > right:
right = item[1]
return (left, right, top, bot)
def load_im(path):
im = PIL.Image.open(path).convert('L')
side = path.split("_")[1].split(".")[0]
if side == "L":
im = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)
return np.array(im), side
def pick_best_component(edges):
pectoral_boundary = None
components = return_connected_components(edges)
if len(components) == 0:
return None
lengths = []
E_c = []
E_x = []
for i in range(len(components)):
lengths.append(len(components[i]))
ends = []
for item in components[i]:
if len(get_nonzero_neighbors(edges, item)) <= 2:
ends.append(item)
if len(ends) > 2:
ends = find_greatest_dist(ends)
dist = ((ends[0][0] - ends[1][0]) ** 2 + (ends[0][1] - ends[1][1]) ** 2) ** 0.5
if dist > lengths[i]:
dist = lengths[i]
E_c.append(dist / lengths[i])
(l,r,t,b) = get_bounding_box(components[i])
E_x.append(lengths[i] / abs(r-l)*abs(t-b))
mean = statistics.mean(lengths)
std = 0
if(len(lengths) > 1):
std = statistics.stdev(lengths)
T_hat = mean + std
long_bois = []
for i in range(len(lengths)):
if lengths[i] > T_hat:
long_bois.append(i)
if len(long_bois) == 1:
pectoral_boundary = components[long_bois[0]]
elif len(long_bois) > 1:
best_length = long_bois[0]
best_E_c = long_bois[0]
best_E_x = long_bois[0]
for i in long_bois:
if lengths[i] > lengths[best_length]:
best_length = i
if E_c[i] > E_c[best_E_c]:
best_E_c = i
if E_x[i] > E_x[best_E_x]:
best_E_x = i
if best_length == best_E_c or best_length == best_E_x:
pectoral_boundary = components[best_length]
elif best_E_c == best_E_x:
pectoral_boundary = components[best_E_c]
else:
pectoral_boundary = components[best_length]
else:
best_length = 0
best_E_c = 0
best_E_x = 0
for i in range(len(components)):
if lengths[i] > lengths[best_length]:
best_length = i
if E_c[i] > E_c[best_E_c]:
best_E_c = i
if E_x[i] > E_x[best_E_x]:
best_E_x = i
if best_length == best_E_c or best_length == best_E_x:
pectoral_boundary = components[best_length]
elif best_E_c == best_E_x:
pectoral_boundary = components[best_E_c]
else:
pectoral_boundary = None
return pectoral_boundary
def grow_boundaryv2(im, original, component, start, end):
new_im = im.copy()
if start == None or end == None:
return im
ends = []
for item in component:
if len(get_nonzero_neighbors(im, item)) <= 2:
ends.append(item)
if len(ends) > 2:
ends = find_greatest_dist(ends)
segment_start = None
segment_end = None
if ends[0][0] > ends[1][0]:
segment_start = ends[0]
segment_end = ends[1]
else:
segment_start = ends[1]
segment_end = ends[0]
new_points = []
grow_up_points = [segment_end]
grow_down_points = [segment_start]
spot_to_grow = segment_end
# grow up
maxX = 0
if start is not None:
maxX = max([maxX, start[1]])
if end is not None:
maxX = max([maxX, end[1]])
starting_intensity = (get_avg_intensity(original, segment_start, 9) + get_avg_intensity(original, segment_end, 9)) / 2
while spot_to_grow[0] > 4:
best_spot = spot_to_grow[1]
best_diff = abs(starting_intensity - get_avg_intensity(original, (spot_to_grow[0]-4, spot_to_grow[1]), 9))
for i in range(spot_to_grow[1]-4, spot_to_grow[1]+4):
if i >= 0 and i < im.shape[1] and i < maxX:
spot_intensity = get_avg_intensity(original, (spot_to_grow[0]-4, i), 9)
diff = abs(starting_intensity - spot_intensity)
if diff < best_diff:
best_diff = diff
best_spot = i
new_points.append((max([spot_to_grow[0]-4, 0]), best_spot))
spot_to_grow = (max([spot_to_grow[0]-4, 0]), best_spot)
grow_up_points.append((max([spot_to_grow[0]-4, 0]), best_spot))
if(spot_to_grow[0] <= 4 and spot_to_grow[0] > 0):
grow_up_points.append((0, grow_up_points[len(grow_up_points)-1][1]))
new_points.append((0, grow_up_points[len(grow_up_points)-1][1]))
spot_to_grow = segment_start
# grow down
starting_intensity = (get_avg_intensity(original, segment_start, 9) + get_avg_intensity(original, segment_end, 9)) / 2
while spot_to_grow[1] > 4 and spot_to_grow[0] < im.shape[0] - 1 - 4:
best_spot = spot_to_grow[1]
best_diff = abs(starting_intensity - get_avg_intensity(original, (spot_to_grow[0]+4, spot_to_grow[1]), 9))
for i in range(spot_to_grow[1]-4, spot_to_grow[1]+4):
if i >= 0 and i < im.shape[1] and i < maxX:
spot_intensity = get_avg_intensity(original, (spot_to_grow[0]+4, i), 9)
diff = abs(starting_intensity - spot_intensity)
if diff < best_diff:
best_diff = diff
best_spot = i
new_points.append((min([spot_to_grow[0]+4, im.shape[0]-1]), best_spot))
spot_to_grow = (min([spot_to_grow[0]+4, im.shape[0]-1]), best_spot)
grow_down_points.append((min([spot_to_grow[0]+4, im.shape[0]-1]), best_spot))
if(spot_to_grow[0] > im.shape[0] - 1 - 4 and spot_to_grow[0] < im.shape[0] - 1):
grow_down_points.append((im.shape[0]-1, grow_down_points[len(grow_down_points)-1][1]))
new_points.append((im.shape[0]-1, grow_down_points[len(grow_down_points)-1][1]))
#for item in new_points:
# new_im[item[0], item[1]] = 1
if DEBUG:
cv2.imshow("pre-joining", new_im)
for i in range(len(grow_up_points)-1):
new_im = connect_two_points(new_im, [grow_up_points[i], grow_up_points[i+1]])
for i in range(len(grow_down_points)-1):
new_im = connect_two_points(new_im, [grow_down_points[i], grow_down_points[i+1]])
return new_im
def connect_two_points(im, points):
"""
m_inv = None
if points[1][0] != points[0][0]:
m_inv = (points[1][1] - points[0][1]) / (points[1][0] - points[0][0])
for i in range(1, abs(points[1][0] - points[0][0])):
x_spot = points[1][1]
if m_inv is not None:
x_spot = int(m_inv * i) + points[1][1]
im[i+points[1][0], x_spot] = 1
"""
iters = 10
for i in range(0, iters):
spot = lerp(points[0], points[1], i / iters).astype(np.uint8())
im[spot[0], spot[1]] = 1
return im
def lerp(a, b, p):
result = None
if len(a) > 1:
result = []
for i in range(len(a)):
result.append(a[i] + (b[i] - a[i]) * p)
else:
result = a + (b-a)*p
return np.array(result)
def grow_boundary(im, original, component, start, end):
new_im = im.copy()
if start == None or end == None:
return im
ends = []
for item in component:
if len(get_nonzero_neighbors(im, item)) <= 2:
ends.append(item)
if len(ends) > 2:
ends = find_greatest_dist(ends)
segment_start = None
segment_end = None
if ends[0][0] > ends[1][0]:
segment_start = ends[0]
segment_end = ends[1]
else:
segment_start = ends[1]
segment_end = ends[0]
new_points = []
spot_to_grow = segment_end
# grow up
maxX = 0
if start is not None:
maxX = max([maxX, start[1]])
if end is not None:
maxX = max([maxX, end[1]])
starting_intensity = get_avg_intensity(original, spot_to_grow, 9)
while spot_to_grow[0] != 0:
best_spot = spot_to_grow[1]
best_diff = abs(starting_intensity - get_avg_intensity(original, (spot_to_grow[0]-1, spot_to_grow[1]), 9))
for i in range(spot_to_grow[1]-2, spot_to_grow[1]+2):
if i >= 0 and i < im.shape[1] and i < maxX:
spot_intensity = get_avg_intensity(original, (spot_to_grow[0]-1, i), 9)
diff = abs(starting_intensity - spot_intensity)
if diff < best_diff:
best_diff = diff
best_spot = i
new_points.append((spot_to_grow[0]-1, best_spot))
spot_to_grow = (spot_to_grow[0]-1, best_spot)
spot_to_grow = segment_start
# grow down
starting_intensity = get_avg_intensity(original, spot_to_grow, 9)
while spot_to_grow[1] != 0 and spot_to_grow[0] != im.shape[0] - 1:
best_spot = spot_to_grow[1]
best_diff = abs(starting_intensity - get_avg_intensity(original, (spot_to_grow[0]+1, spot_to_grow[1]), 9))
for i in range(spot_to_grow[1]-2, spot_to_grow[1]+2):
if i >= 0 and i < im.shape[1] and i < maxX:
spot_intensity = get_avg_intensity(original, (spot_to_grow[0]+1, i), 9)
diff = abs(starting_intensity - spot_intensity)
if diff < best_diff:
best_diff = diff
best_spot = i
new_points.append((spot_to_grow[0]+1, best_spot))
spot_to_grow = (spot_to_grow[0]+1, best_spot)
for item in new_points:
new_im[item[0], item[1]] = 1
return new_im
def get_avg_intensity(im, spot, boxsize):
intensity = 0
count = 0
for i in range(int(spot[0] - boxsize/2), int(spot[0] + boxsize/2)):
for j in range(int(spot[1]-boxsize/2), int(spot[1]+boxsize/2)):
if i >= 0 and i < im.shape[0]:
if j >= 0 and j < im.shape[1]:
intensity = intensity + im[i,j]
count = count + 1
if count != 0:
intensity = intensity / count
return intensity
def fill_image(im):
img = im.copy()
for i in range(img.shape[0]):
switch_spots = [0]
j = 0
last_color = 0
while j < img.shape[1]:
if img[i,j] != last_color:
switch_spots.append(j)
last_color = img[i,j]
j = j + 1
if len(switch_spots) > 1:
color = 1
for index in range(len(switch_spots)-1):
for k in range(switch_spots[index], switch_spots[index+1]):
img[i,k] = color
color = 1 - color
return img
def finalize_boundary(edges, original, start, end):
if start == None or end == None:
return np.ones_like(original)
component = pick_best_component(edges)
if component is None:
return np.ones_like(original)
im = np.zeros_like(edges)
for item in component:
im[item[0], item[1]] = 1
if DEBUG:
cv2.imshow("finalcomponent",im)
im = grow_boundaryv2(im, original, component, start, end)
if DEBUG:
cv2.imshow("finalgrowth",im)
im_filled = fill_image(im)
if DEBUG:
cv2.imshow("final_filled",im_filled)
for i in range(im_filled.shape[0]):
for j in range(im_filled.shape[1]):
im_filled[i,j] = 1 - im_filled[i,j]
return im_filled
def segment_pectoral_from_breast(path):
im_og, side = load_im(path)
mask, bbox = breast_segment(im_og, scale_factor=1)
if DEBUG:
cv2.imshow("FirstMask", mask.astype(np.uint8))
im_og = np.multiply(im_og, mask)
im = np.array(PIL.Image.fromarray(im_og).resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
start, end = find_start_and_end_points(im)
print("start: " + str(start))
print("end: " + str(end))
im = np.array(PIL.Image.fromarray(im).filter(PIL.ImageFilter.MedianFilter(size=9)))
im = anisotropic_diffusion(im, niter=1)
im = im.astype(np.uint8)
edges = canny(im)
edges = clean_canny(edges, start, end)
final_mask = finalize_boundary(edges, im, start, end)
if DEBUG:
cv2.imshow("final_mask",final_mask)
final_image = np.multiply(im_og.astype(np.uint8), np.array(PIL.Image.fromarray(final_mask).resize((im_og.shape[1], im_og.shape[0]))).astype(np.uint8))
if DEBUG:
cv2.imshow("final_image",final_image)
cv2.waitKey(0)
return final_image
def segment_pectoral_from_breast_mask(path):
im_og, side = load_im(path)
mask, bbox = breast_segment(im_og, scale_factor=1)
if DEBUG:
cv2.imshow("FirstMask", mask.astype(np.uint8))
im_og = np.multiply(im_og, mask)
im = np.array(PIL.Image.fromarray(im_og).resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
start, end = find_start_and_end_points(im)
print("start: " + str(start))
print("end: " + str(end))
im = np.array(PIL.Image.fromarray(im).filter(PIL.ImageFilter.MedianFilter(size=9)))
im = anisotropic_diffusion(im, niter=1)
im = im.astype(np.uint8)
edges = canny(im)
edges = clean_canny(edges, start, end)
final_mask = finalize_boundary(edges, im, start, end)
final_image = np.array(PIL.Image.fromarray(final_mask*255).resize((im_og.shape[1], im_og.shape[0]))).astype(np.uint8)
return final_image
def save_all_crops(dir, saveDir):
for im_name in os.listdir(dir):
im = segment_pectoral_from_breast(os.path.join(dir,im_name))
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
def save_all_crops_mask(dir, saveDir):
for im_name in os.listdir(dir):
im = segment_pectoral_from_breast_mask(os.path.join(dir,im_name))
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
def rectangle_cut_crop_for_analysis(dir, saveDir):
for im_name in os.listdir(dir):
im_og, side = load_im(os.path.join(dir, im_name))
mask, bbox = breast_segment(im_og, scale_factor=1)
if DEBUG:
cv2.imshow("FirstMask", mask.astype(np.uint8))
im_og = np.multiply(im_og, mask)
im = np.array(PIL.Image.fromarray(im_og))#.resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
im[0:int(im.shape[0]/2),0:int(im.shape[1]/4)] = 0
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
def mid_cut_crop_for_analysis(dir, saveDir):
for im_name in os.listdir(dir):
im_og, side = load_im(os.path.join(dir, im_name))
mask, bbox = breast_segment(im_og, scale_factor=1)
if DEBUG:
cv2.imshow("FirstMask", mask.astype(np.uint8))
im_og = np.multiply(im_og, mask)
im = np.array(PIL.Image.fromarray(im_og))#.resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
im[int(im.shape[0]/6):int(5*im.shape[0]/6),int(im.shape[1]/6):int(5*im.shape[1]/6)] = 0
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
def random_rect_crop_for_analysis(dir, saveDir):
for im_name in os.listdir(dir):
im_og, side = load_im(os.path.join(dir, im_name))
mask, bbox = breast_segment(im_og, scale_factor=1)
if DEBUG:
cv2.imshow("FirstMask", mask.astype(np.uint8))
im_og = np.multiply(im_og, mask)
im = np.array(PIL.Image.fromarray(im_og))#.resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
startX = random.randrange(0, im.shape[1]-1)
endX = random.randrange(startX, im.shape[1]-1)
startY = random.randrange(0, im.shape[0]-1)
endY = random.randrange(startY, im.shape[0]-1)
im[startY:endY,startX:endX] = 0
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
def all_black_for_analysis(dir, saveDir):
for im_name in os.listdir(dir):
im_og, side = load_im(os.path.join(dir, im_name))
mask, bbox = breast_segment(im_og, scale_factor=1)
if DEBUG:
cv2.imshow("FirstMask", mask.astype(np.uint8))
im_og = np.multiply(im_og, mask)
im = np.array(PIL.Image.fromarray(im_og))#.resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
im[:,:] = 0
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
def all_white_for_analysis(dir, saveDir):
for im_name in os.listdir(dir):
im_og, side = load_im(os.path.join(dir, im_name))
mask, bbox = breast_segment(im_og, scale_factor=1)
if DEBUG:
cv2.imshow("FirstMask", mask.astype(np.uint8))
im_og = np.multiply(im_og, mask)
im = np.array(PIL.Image.fromarray(im_og))#.resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
im[:,:] = 255
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
def no_crop_for_analysis(dir, saveDir):
for im_name in os.listdir(dir):
im_og, side = load_im(os.path.join(dir, im_name))
im = np.array(PIL.Image.fromarray(im_og))#.resize((int(im_og.shape[1]*0.25), int(im_og.shape[0]*0.25))))
short_name = im_name.split(".")[0]
PIL.Image.fromarray(im).save(os.path.join(saveDir, short_name + ".png"))
DEBUG = False
#segment_pectoral_from_breast("../Images/NORMAL/N40_R.bmp")
#save_all_crops("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/NewCroppingMethodv5/Contralateral/")
#save_all_crops("../Images/NORMAL/", "../Images/NewCroppingMethodv5/Normal/")
#save_all_crops("../Images/CANCER/", "../Images/NewCroppingMethodv5/Cancer/")
#save_all_crops_mask("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/AutoCropMasks/Contralateral/")
#save_all_crops_mask("../Images/NORMAL/", "../Images/AutoCropMasks/Normal/")
#save_all_crops_mask("../Images/CANCER/", "../Images/AutoCropMasks/Cancer/")
#all_black_for_analysis("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/AllBlackForAnalysis/Contralateral/")
#all_black_for_analysis("../Images/NORMAL/", "../Images/AllBlackForAnalysis/Normal/")
#all_black_for_analysis("../Images/CANCER/", "../Images/AllBlackForAnalysis/Cancer/")
#all_white_for_analysis("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/AllWhiteForAnalysis/Contralateral/")
#all_white_for_analysis("../Images/NORMAL/", "../Images/AllWhiteForAnalysis/Normal/")
#all_white_for_analysis("../Images/CANCER/", "../Images/AllWhiteForAnalysis/Cancer/")
#no_crop_for_analysis("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/NoCropForAnalysis/Contralateral/")
#no_crop_for_analysis("../Images/NORMAL/", "../Images/NoCropForAnalysis/Normal/")
#no_crop_for_analysis("../Images/CANCER/", "../Images/NoCropForAnalysis/Cancer/")
#mid_cut_crop_for_analysis("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/MidCropForAnalysis/Contralateral/")
#mid_cut_crop_for_analysis("../Images/NORMAL/", "../Images/MidCropForAnalysis/Normal/")
#mid_cut_crop_for_analysis("../Images/CANCER/", "../Images/MidCropForAnalysis/Cancer/")
#rectangle_cut_crop_for_analysis("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/RectangleCutCropForAnalysis/Contralateral/")
#rectangle_cut_crop_for_analysis("../Images/NORMAL/", "../Images/RectangleCutCropForAnalysis/Normal/")
#rectangle_cut_crop_for_analysis("../Images/CANCER/", "../Images/RectangleCutCropForAnalysis/Cancer/")
#random_rect_crop_for_analysis("../Images/CONTRALATERAL BREAST TO CANCEROUS/", "../Images/RandomRectCropForAnalysis/Contralateral/")
#random_rect_crop_for_analysis("../Images/NORMAL/", "../Images/RandomRectCropForAnalysis/Normal/")
#random_rect_crop_for_analysis("../Images/CANCER/", "../Images/RandomRectCropForAnalysis/Cancer/") |
988,785 | 74a02a6b872066f70a887f886f4f3379d74e9181 | """
Probabilty module
"""
def generate_cdf(token_dictionary, n_gram):
count_tables = generate_count_tables(token_dictionary, n_gram)
n_minus_one_count = count_tables[0];
ngram_count_table = count_tables[1];
#print(n_minus_one_count);
#print(ngram_count_table);
for key in ngram_count_table:
row = ngram_count_table[key];
oldkey = "";
for nth_word in row:
ngram_count_table[key][nth_word] /= n_minus_one_count[key];
if( oldkey != ""):
ngram_count_table[key][nth_word]+= ngram_count_table[key][oldkey];
oldkey = nth_word;
#print("\n Cumulative distribution table",ngram_count_table);
return(ngram_count_table);
def generate_probability_table(token_dictionary, n_gram):
count_tables = generate_count_tables(token_dictionary, n_gram)
n_minus_one_count = count_tables[0];
ngram_count_table = count_tables[1];
#print(n_minus_one_count);
#print(ngram_count_table);
for key in ngram_count_table:
row = ngram_count_table[key];
for nth_word in row:
ngram_count_table[key][nth_word] /= n_minus_one_count[key];
#print("\n Cumulative distribution table",ngram_count_table);
return(ngram_count_table);
def generate_count_tables(token_dictionary, n_gram):
n_minus_one_count = dict();
ngram_count_table = dict();
for key in token_dictionary:
n_minus_one_words = key[0:n_gram-1];
nth_word = key[n_gram-1];
if( n_minus_one_words in n_minus_one_count ):
n_minus_one_count[n_minus_one_words] += token_dictionary[key]
else:
n_minus_one_count[n_minus_one_words] = token_dictionary[key]
if( n_minus_one_words in ngram_count_table ):
ngram_count_table[n_minus_one_words][nth_word] = token_dictionary[key];
else:
ngram_count_table[n_minus_one_words] = dict();
ngram_count_table[n_minus_one_words][nth_word] = token_dictionary[key];
#print("\n N minus one count",n_minus_one_count);
#print("\n N minus one count",ngram_count_table);
return (n_minus_one_count, ngram_count_table);
|
988,786 | acc0cb85d882f229dd0c0f31e1967c907fb2a4ff | import requests
from bs4 import BeautifulSoup
from re import sub
from time import sleep
from decimal import Decimal
from Results import Results
class USCEfficiency:
base_url = 'https://www.urbansportsclub.com/en'
login_url = 'https://urbansportsclub.com/en/login'
get_headers = {'User-Agent': 'Mozilla/5.0'}
def __init__(self, email, password):
self.email = email
self.password = password
self.session = requests.Session()
def get(self):
self.login()
self.get_number_of_check_ins()
self.get_total_amount_paid()
self.print_results()
def parse_amount(self, amount):
return Decimal(sub(r'[^\d.]', '', amount)) / Decimal(100)
def parse_row(self, row):
columns = row.find_all('div')
price = columns[-1]
return price.text.strip()
def login(self):
self.get_headers = {'User-Agent': 'Mozilla/5.0'}
login_response = self.session.get(self.base_url, headers=self.get_headers)
login_soup = BeautifulSoup(login_response.text, 'html.parser')
# Find DOM element: hidden <input>
login_form = login_soup.find(id='login-form')
hidden_input = login_form.find(type='hidden')
# Extract values from hidden input
hidden_key = hidden_input['id']
hidden_value = hidden_input['value']
# Configure headers and data from hidden keys
data = f'{hidden_key}={hidden_value}&check=&email={self.email}&password={self.password}&remember-me=1'
post_headers = {
'content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0',
'x-newrelic-id': hidden_key
}
self.session.post(self.login_url, data=data, headers=post_headers)
sleep(0.5)
def get_number_of_check_ins(self):
membership_response = self.session.get(self.base_url + '/profile/membership', headers=self.get_headers)
membership_soup = BeautifulSoup(membership_response.text, 'html.parser')
# Find DOM element with value: <span>142</span> Check-ins
check_ins = membership_soup.find('span', class_='smm-checkin-stats__total')
self.number_of_checkins = Decimal(check_ins.text.strip())
sleep(0.5)
def get_total_amount_paid(self):
payment_history_response = self.session.get(self.base_url + '/profile/payment-history', headers=self.get_headers)
payment_history_soup = BeautifulSoup(payment_history_response.text, 'html.parser')
# Find DOM elements: table and rows
# Find DOM element with value: <span>60,00€</span>
table = payment_history_soup.find('div', class_='smm-payment-history__table')
rows = table.select('div .smm-payment-history__table-row')
prices_column = map(self.parse_row, rows)
list_of_prices = map(self.parse_amount, prices_column)
self.total_cost = sum(list_of_prices)
def print_results(self):
results = Results(self.number_of_checkins, self.total_cost)
results.display() |
988,787 | e1aa9fcd162ffe00b1067c6abdd5172f51903fdc | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class bbs(models.Model):
title = models.CharField(max_length=150,unique=True)
category_option = (
('linux','Linux BBS'),
('python','PY BBS'))
category = models.CharField(max_length=50,choices=category_option)
content = models.TextField()
view_count = models.IntegerField(default=0)
comment_count = models.IntegerField(default=0)
ranking = models.IntegerField(default=1001)
author = models.ForeignKey(User)
publish_date = models.DateField()
modify_date = models.DateField()
def __unicode__(self):
return self.title
class Publisher(models.Model):
name = models.CharField(max_length=30)
address = models.CharField(max_length=50)
city = models.CharField(max_length=60)
state_province = models.CharField(max_length=30)
country = models.CharField(max_length=50)
website = models.URLField()
def __unicode__(self):
return self.name
class Author(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=50)
email = models.EmailField()
def __unicode__(self):
return self.last_name
class Book(models.Model):
title = models.CharField(max_length=100)
authors = models.ManyToManyField(Author)
publisher = models.ForeignKey(Publisher)
publication_date = models.DateField()
def __unicode__(self):
return self.title
|
988,788 | bad6d5f365c1cd1bc1d9d2692e6407ea918f5923 |
def trans(S,L):
for s in S:
if L[s]=='0':
L[s]='1'
else:
L[s]='0'
return L
def helper(L,p,times):
if '1' not in L or times>10:
Re.append(times)
return
else:
for j in range(8):
if p==7:
return helper(trans([0, 6, 7], L),j,times+1)
else:
return helper(trans([p - 1, p, p + 1], L),j,times+1)
L=['0', '0', '0', '0', '0', '0', '0', '0']
Re=[]
for i in range(8):
helper(L,i,0)
print(Re.sort()[0])
|
988,789 | c5be537e0182985ed9bf428aed2f9a1d409ae1eb | odd = ['a','c','e','g']
column, row = input('please enter a position on the chess board ')
row = int(row)
if column in odd:
if row%2 == 1:
print('black')
else:
print('white')
else:
if row%2 == 1:
print('white')
else:
print('black') |
988,790 | 3fff8b617802a0a6b44f0155cd5a6d96cd8a2e63 | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
app_name = 'app'
urlpatterns = [
path('', views.index, name="index"),
path('login/', auth_views.LoginView.as_view(template_name='app/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('signup/', views.signup, name='signup'),
path('sample/', views.sample, name='sample'),
path('sample_new', views.sample_new, name='sample_new'),
path('book/<int:book_id>/', views.consideration, name='consideration'), # detail
path('book/<str:category>/', views.books_category, name='books_category'), # category
path('new_book_consider/', views.new_book_consider, name='new_book_consider'), # new
path('delete/<int:book_id>/', views.delete, name='delete'), # delete
path('edit/<int:book_id>/', views.edit, name='edit'), # edit
] |
988,791 | f62e5eaf7c7449473933bd0f2abd05f319e035e3 | from bs4 import BeautifulSoup
import requests
import spotipy
from spotipy.oauth2 import SpotifyOAuth
import os
# SPOTIFY AUTHENTICATION USING SPOTIPY
CLIENT_ID = os.environ['SPOTIPY_CLIENT_ID']
CLIENT_SECRET = os.environ['SPOTIPY_CLIENT_SECRETS']
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
scope="playlist-modify-private",
redirect_uri="http://example.com",
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
show_dialog=True,
cache_path="token.txt"
)
)
user_id = sp.current_user()["id"]
print(user_id)
# BEAUTIFULSOUP - WEB SCRAPING TO GET LIST OF TOP SONGS FOR INPUTTED DATE
date = input('Which year would you like to travel to? Type the data in this format, YYYY-MM-DD:')
URL = 'https://www.billboard.com/charts/hot-100/'
response = requests.get(URL+date)
webpage = response.text
soup = BeautifulSoup(webpage, 'html.parser')
songs = soup.find_all(name='span', class_='chart-element__information__song text--truncate color--primary')
song_names = [song.getText() for song in songs]
# GETTING SONG URIS
song_uris = []
year = date.split("-")[0]
for song in song_names:
result = sp.search(q=f"track:{song} year:{year}", type="track")
try:
uri = result["tracks"]["items"][0]["uri"]
song_uris.append(uri)
except IndexError:
print(f"{song} doesn't exist in Spotify. Skipped.")
# CREATING PLAYLIST AND ADDING SONGS
playlist = sp.user_playlist_create(user=user_id, name=f"{date} Billboard 100", public=False)
sp.playlist_add_items(playlist_id=playlist["id"], items=song_uris)
|
988,792 | f9f8aced4aa81be7be0839ba7da9a334161e287d | import os
import json
from django.conf import settings
from django.http.response import HttpResponse
from django.shortcuts import render
from rest_framework import viewsets
import plotly
import plotly.graph_objs as go
from stocks.models import Stock
from stocks.serializer import StockSerializer
class StockViewSet(viewsets.ModelViewSet):
queryset = Stock.objects.all()
serializer_class = StockSerializer
filter_fields = ("id", "title", 'stock_count')
def index(_):
html = open(
os.path.join(settings.STATICFILES_DIRS[0], "vue_grid.html")).read()
return HttpResponse(html)
def plotly_view(request):
x_list = list()
y_list = list()
for stock in Stock.objects.all():
x_list.append(stock.title)
y_list.append(stock.stock_count)
figure = go.Figure(
data=[go.Bar(x=x_list, y=y_list)],
layout=go.Layout(title='second graph'))
graph_json = json.dumps(figure, cls=plotly.utils.PlotlyJSONEncoder)
return render(request, "stocks/plotly.html",
{"graph_json":graph_json})
|
988,793 | 8a2cc3371e67c085fafb7c344549918f67269de4 | from scipy.spatial.distance import euclidean
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics import silhouette_score
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import kneighbors_graph
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import itertools
from scipy.sparse import csr_matrix
from utils import *
'''
NORMALIZE THE CENTROID DIFFERENCES BY SOME STANDARD MEASURE OF BRAIN SIZE
'''
def createGroupingFile(clusters, filepath):
# convert back to indices
names = []
all_groups = []
for c,v in clusters.iteritems():
names.append(c)
all_groups.append(v)
createROIGrouping(names, all_groups, filepath)
printROIGrouping(names, all_groups)
def parseResult(data, lut_table):
'''
region values are ordered by sorted subjects (low->high)
'''
region_list = data['region_list']
regions = data['regions']
subj_list = data['subj_list']
subjects = data['subjects']
while len(subjects) <= 1:
subjects = subjects[0]
while len(regions) <= 1:
regions = regions[0]
while len(region_list) <= 1:
region_list = region_list[0]
while len(subj_list) <= 1:
subj_list = subj_list[0]
by_region = {}
for region_code, region in zip(regions, region_list):
while len(region) <= 1:
region = region[0]
key = lut_table.get(region_code, region_code)
by_region[lut_table.get(region_code, region_code)] = region
by_subj = {}
for subj_id, subj_result in zip(subjects, subj_list):
while len(subj_result) <= 1:
subj_result = subj_result[0]
subj_data = {}
for i, row in enumerate(subj_result):
lut_index = i+1
row = row[0]
if len(row) == 0:
continue
lut_name = lut_table.get(lut_index, lut_index)
fields = row.dtype.names
rowdata = row[0]
extracted = {}
for k,v in zip(fields, rowdata):
while (isinstance(v, np.ndarray) or isinstance(v, np.void)) and len(v) <= 1:
v = v[0]
extracted[k] = v
subj_data[lut_name] = extracted
by_subj[subj_id] = subj_data
return by_region, by_subj, subjects
def createConnectivityGraph(region_list):
'''
Separate ventricles from white matter from other
'''
ventricles = ['Left-Lateral-Ventricle','4th-Ventricle','CSF','3rd-Ventricle','5th-Ventricle','Left-Inf-Lat-Vent','Right-Inf-Lat-Vent']
whitematter = ['Optic-Chiasm','CC_Central','Left-Cerebral-White-Matter','Left-Cerebellum-White-Matter','CC_Anterior','Right-Cerebellum-White-Matter','ctx-rh-corpuscallosum','CC_Mid_Posterior','CC_Posterior','WM-hypointensities','CC_Mid_Anterior','Right-Cerebral-White-Matter']
#ambiguous = ['Optic-Chiasm','ctx-rh-unknown','ctx-lh-unknown']
ambiguous = []
others = [_ for _ in region_list if _ not in ventricles+whitematter+ambiguous]
graph = np.zeros((len(region_list),len(region_list)))
for v1, v2 in itertools.combinations_with_replacement(ventricles, 2):
v1_index = region_list.index(v1)
v2_index = region_list.index(v2)
graph[v1_index, v2_index] = 1
graph[v2_index, v1_index] = 1
for v1, v2 in itertools.combinations_with_replacement(whitematter, 2):
v1_index = region_list.index(v1)
v2_index = region_list.index(v2)
graph[v1_index, v2_index] = 1
graph[v2_index, v1_index] = 1
for v1, v2 in itertools.combinations_with_replacement(others, 2):
v1_index = region_list.index(v1)
v2_index = region_list.index(v2)
graph[v1_index, v2_index] = 1
graph[v2_index, v1_index] = 1
sparse = csr_matrix(graph)
return sparse
def runClustering(obs, conn, clusters, print_out=False):
'''
# KMEANS
model = KMeans(n_clusters=clusters, max_iter=1000, tol=0.00001,
n_jobs=-1, copy_x=True, verbose=False)
labels = model.fit_predict(obs)
try:
score = silhouette_score(obs,labels,metric='euclidean')
except Exception as e:
print e
score = np.nan
'''
# AGGLOMERATIVE
model = AgglomerativeClustering(n_clusters=clusters,
affinity='euclidean',
linkage='ward',
connectivity=conn)
labels = model.fit_predict(obs)
try:
score = silhouette_score(obs,labels,metric='euclidean')
except Exception as e:
print e
score = np.nan
c = (clusters, 'agg', score, model)
print "Clusters: %s, Neighbors: %s, Score: %s" % (c[0],c[1],c[2])
labels = c[3].fit_predict(obs)
by_cluster_label = {_:[] for _ in list(set(labels))}
for cluster, segment in zip(labels, segments):
segment_index = [k for k,v in lut_table.iteritems() if v == segment][0]
by_cluster_label[cluster].append(segment_index)
#by_cluster_label[cluster].append(segment)
if print_out:
roinames = []
varnames = []
for cluster, indices in by_cluster_label.iteritems():
cluster_varname = 'Cluster%s' % cluster
print '%s=%s' % (cluster_varname,indices)
roinames.append("%s" % cluster_varname)
varnames.append(cluster_varname)
print "all_groups=[%s]" % (','.join(varnames),)
print "names=%s" % roinames
print "\n\n"
return by_cluster_label
def pca_transform(obs, components):
# PCA
pca_model = PCA(n_components=components, copy=True, whiten=False)
obs_pca = pca_model.fit_transform(obs)
'''
for x in pca_model.explained_variance_ratio_:
print x
# no pca
obs_pca = obs
'''
print "PCA: %s -> %s" % (obs.shape, obs_pca.shape)
return obs_pca
if __name__ == '__main__':
output_mat = "../aggOutput.mat"
data = loadMATFile(output_mat)
lut_file = "../FreeSurferColorLUT.txt"
lut_table = importFreesurferLookup(lut_file)
by_region, by_subj, subjects = parseResult(data, lut_table)
'''
# find centroid of centroids + normalize it somehow!!! (divide by original image x,y,z)
centroids = {}
for r in by_region.keys():
subj_centroids = []
for s in subjects:
cur_centroid = by_subj[s].get(r,{}).get('centroid', None)
if cur_centroid is not None:
subj_centroids.append(cur_centroid)
centroids[r] = np.mean(np.array(subj_centroids), axis=0)
'''
binding_distributions = []
centroid_vectors = []
segments = []
variances = {}
means = {}
for k,v in by_region.iteritems():
v = np.nan_to_num(v)
binding_distributions.append(v)
#centroid_vectors.append(centroids[k])
segments.append(k)
variances[k] = np.std(v)
means[k] = np.mean(v)
'''
sorted_means = sorted(means.items(), key=lambda x: x[1], reverse=True)
for k,v in sorted_means:
print "%s: %s +/- %s" % (k,v, variances[k])
'''
obs = np.array(binding_distributions)
# Append centroids
'''
appended = []
for i,x in enumerate(obs):
appended.append(np.append(x, centroid_vectors[i]))
obs = np.array(appended)
'''
# Scale (not really necessary as scans are already prenormalized)
'''
scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
obs = scaler.fit_transform(obs)
'''
# CHOOSE NUMBER OF CLUSTERS
'''
ks=range(20,60)
gaps_raw, valid_ks_raw = gap(obs, nrefs=100, ks=ks)
print "RAW GAPS"
print gaps_raw
print sorted(valid_ks_raw, key=lambda x: x[1], reverse=True)
fig = plt.figure()
plt.plot(ks, gaps_raw)
plt.show()
sys.exit(1)
'''
num_clusters = [23,55]
# RUN HIERARCHICAL CLUSTERING
# RUN K_MEANS
scores = []
# Get connectivity graph
connectivity = createConnectivityGraph(segments)
fractions = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
results = {_:[] for _ in fractions}
for f in fractions:
for i in range(15):
# split obs in half
half = int(obs.shape[1] * f)
random_indices = np.random.choice(range(obs.shape[1]), size=half, replace=False)
obs_1 = obs[:,random_indices]
obs_2 = obs
observations = [obs_1, obs_2]
# Components explain 99% of variance
n_comp = 0.9
by_k = {}
for cur_k in num_clusters:
outputs = []
for obs in observations:
obs_pca = pca_transform(obs, components=n_comp)
#obs_pca = obs
o = runClustering(obs_pca, connectivity, cur_k)
outputs.append(o)
by_k[cur_k] = outputs
# check out grouping differences
for k, outputs in by_k.iteritems():
commons = []
values = [[sorted(_) for _ in o.values()] for o in outputs]
numregions = 0
# find values that appear in all
for o in values[0]:
numregions += len(o)
common = all([(o in _ ) for _ in values])
if common:
commons.append(o)
# remove common values
for v in values:
for c in commons:
v.remove(c)
print "Num Clusters: %s" % k
commons = [i for _ in commons for i in _]
commonPercent = len(commons) / float(numregions)
results[f].append((k, commonPercent))
print "%s/%s in common: %s " % (len(commons),numregions,commons)
'''
for i, uv in enumerate(values):
print "Unique %s" % i
for _ in uv:
print "\t%s" % _
'''
print '\n'
points = defaultdict(list)
for k in sorted(results.keys()):
v = results[k]
by_clusters = defaultdict(list)
for a,b in v:
by_clusters[a].append(b)
for c,vals in dict(by_clusters).iteritems():
points[c].append((k, np.mean(vals)))
print points
|
988,794 | 11a9bc1f37ee7a74620d11069f2c450d1d7ed01b | n = int(input())
for _ in range(n):
a, b = map(int, input().split())
if abs(a - b) == 1:
if max(a, b) % 2 == 0:
print("CHEF")
else:
print("CHEFINA")
elif a > b:
print("CHEF")
elif b > a:
print("CHEFINA")
else:
if a % 2 == 0:
print("CHEFINA")
else:
print("CHEF") |
988,795 | b21dfd2d72e9ea1f0828543f3cbb8a5bd4711bce | from lab5 import student
class PHD_student(student):
def __init__(self,name,year,gpa,current_classes,advisor,numberOfResearchPapers):
super(PHD_student,self).__init__(name,year,gpa,current_classes)
self.advisor = advisor
self.numberOfResearchPapers = numberOfResearchPapers
def addYear(self):
if(self.year < 6):
self.year += 1
class athlete_student(student):
def __init__(self,name,year,gpa,current_classes,sport,yearsOfExercise,onScholarship,starter):
super(athlete_student,self).__init__(name,year,gpa,current_classes)
self.sport = sport
self.yearsOfExercise = yearsOfExercise
self.onScholarship = onScholarship
self.starter = starter
def addYear(self):
if(self.year < 4):
self.year += 1
def setgpa(self,newgpa):
self.gpa = newgpa
def addClass(self,newClass):
self.current_classes.update(newClass)
def main():
PHD_student1 = PHD_student(name = "yuchen", year = 5, gpa = 3.8, current_classes = {"csc1": 3},numberOfResearchPapers = 100,advisor = "malir")
athlete_student1 = athlete_student(name = "yuchen", year = 4, gpa =3.0,current_classes = {"chemstry" : 3}, sport = "swiming " ,yearsOfExercise = 5,onScholarship = "Yes",starter = " Joe ")
print(PHD_student1.name,PHD_student1.year,PHD_student1. gpa,PHD_student1.current_classes,PHD_student1.numberOfResearchPapers)
print("name: ",athlete_student1.name,"year: ",athlete_student1.year,"gpa: ",athlete_student1.gpa,"current_classes: ",athlete_student1.current_classes,"sport: ",athlete_student1.sport,"years pf exercise: ",athlete_student1.yearsOfExercise,"on scholarship: ",athlete_student1.onScholarship, "starter: ",athlete_student1.starter)
if __name__ == '__main__':
main()
|
988,796 | 08aaf54e30e556517fa68b5252fd0dabd3da58a7 | # mypy: allow-untyped-defs
import abc
import argparse
import importlib
import json
import logging
import multiprocessing
import os
import platform
import subprocess
import sys
import threading
import time
import traceback
import urllib
import uuid
from collections import defaultdict, OrderedDict
from io import IOBase
from itertools import chain, product
from html5lib import html5parser
from typing import ClassVar, List, Optional, Set, Tuple
from localpaths import repo_root # type: ignore
from manifest.sourcefile import read_script_metadata, js_meta_re, parse_variants # type: ignore
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve import config
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.response import ResponseHeaders
from wptserve.utils import get_port, HTTPException, http2_compatible
from mod_pywebsocket import standalone as pywebsocket
EDIT_HOSTS_HELP = ("Please ensure all the necessary WPT subdomains "
"are mapped to a loopback device in /etc/hosts.\n"
"See https://web-platform-tests.org/running-tests/from-local-system.html#system-setup "
"for instructions.")
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
def domains_are_distinct(a, b):
a_parts = a.split(".")
b_parts = b.split(".")
min_length = min(len(a_parts), len(b_parts))
slice_index = -1 * min_length
return a_parts[slice_index:] != b_parts[slice_index:]
def inject_script(html, script_tag):
# Tokenize and find the position of the first content (e.g. after the
# doctype, html, and head opening tags if present but before any other tags).
token_types = html5parser.tokenTypes
after_tags = {"html", "head"}
before_tokens = {token_types["EndTag"], token_types["EmptyTag"],
token_types["Characters"]}
error_tokens = {token_types["ParseError"]}
tokenizer = html5parser._tokenizer.HTMLTokenizer(html)
stream = tokenizer.stream
offset = 0
error = False
for item in tokenizer:
if item["type"] == token_types["StartTag"]:
if not item["name"].lower() in after_tags:
break
elif item["type"] in before_tokens:
break
elif item["type"] in error_tokens:
error = True
break
offset = stream.chunkOffset
else:
error = True
if not error and stream.prevNumCols or stream.prevNumLines:
# We're outside the first chunk, so we don't know what to do
error = True
if error:
return html
else:
return html[:offset] + script_tag + html[offset:]
class WrapperHandler:
__meta__ = abc.ABCMeta
headers: ClassVar[List[Tuple[str, str]]] = []
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
headers = self.headers + handlers.load_headers(
request, self._get_filesystem_path(request))
for header_name, header_value in headers:
response.headers.set(header_name, header_value)
self.check_exposure(request)
path = self._get_path(request.url_parts.path, True)
query = request.url_parts.query
if query:
query = "?" + query
meta = "\n".join(self._get_meta(request))
script = "\n".join(self._get_script(request))
response.content = self.wrapper % {"meta": meta, "script": script, "path": path, "query": query}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_filesystem_path(self, request):
"""Get the path of the underlying resource file on disk."""
return self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
def _get_metadata(self, request):
"""Get an iterator over script metadata based on // META comments in the
associated js file.
:param request: The Request being processed.
"""
path = self._get_filesystem_path(request)
try:
with open(path, "rb") as f:
yield from read_script_metadata(f, js_meta_re)
except OSError:
raise HTTPException(404)
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
def _get_script(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._script_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
@abc.abstractmethod
def check_exposure(self, request):
# Raise an exception if this handler shouldn't be exposed after all.
pass
class HtmlWrapperHandler(WrapperHandler):
global_type: ClassVar[Optional[str]] = None
headers = [('Content-Type', 'text/html')]
def check_exposure(self, request):
if self.global_type is not None:
global_variants = ""
for (key, value) in self._get_metadata(request):
if key == "global":
global_variants = value
break
if self.global_type not in parse_variants(global_variants):
raise HTTPException(404, "This test cannot be loaded in %s mode" %
self.global_type)
def _meta_replacement(self, key, value):
if key == "timeout":
if value == "long":
return '<meta name="timeout" content="long">'
if key == "title":
value = value.replace("&", "&").replace("<", "<")
return '<title>%s</title>' % value
return None
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("&", "&").replace('"', """)
return '<script src="%s"></script>' % attribute
return None
class HtmlScriptInjectorHandlerWrapper:
def __init__(self, inject="", wrap=None):
self.inject = inject
self.wrap = wrap
def __call__(self, request, response):
self.wrap(request, response)
# If the response content type isn't html, don't modify it.
if not isinstance(response.headers, ResponseHeaders) or response.headers.get("Content-Type")[0] != b"text/html":
return response
# Skip injection on custom streaming responses.
if not isinstance(response.content, (bytes, str, IOBase)) and not hasattr(response, "read"):
return response
response.content = inject_script(
b"".join(response.iter_content(read_file=True)),
b"<script>\n" +
self.inject + b"\n" +
(b"// Remove the injected script tag from the DOM.\n"
b"document.currentScript.remove();\n"
b"</script>\n"))
return response
class WorkersHandler(HtmlWrapperHandler):
global_type = "dedicatedworker"
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s"));
</script>
"""
class WorkerModulesHandler(HtmlWrapperHandler):
global_type = "dedicatedworker-module"
path_replace = [(".any.worker-module.html", ".any.js", ".any.worker-module.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s", { type: "module" }));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
global_type = "window"
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
isShadowRealm: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class SharedWorkersHandler(HtmlWrapperHandler):
global_type = "sharedworker"
path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s"));
</script>
"""
class SharedWorkerModulesHandler(HtmlWrapperHandler):
global_type = "sharedworker-module"
path_replace = [(".any.sharedworker-module.html", ".any.js", ".any.worker-module.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s", { type: "module" }));
</script>
"""
class ServiceWorkersHandler(HtmlWrapperHandler):
global_type = "serviceworker"
path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register("%(path)s%(query)s", {scope});
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class ServiceWorkerModulesHandler(HtmlWrapperHandler):
global_type = "serviceworker-module"
path_replace = [(".any.serviceworker-module.html",
".any.js", ".any.worker-module.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register(
"%(path)s%(query)s",
{ scope, type: 'module' },
);
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class ShadowRealmHandler(HtmlWrapperHandler):
global_type = "shadowrealm"
path_replace = [(".any.shadowrealm.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
(async function() {
const r = new ShadowRealm();
await new Promise(r.evaluate(`
(resolve, reject) => {
(async () => {
globalThis.self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return false; },
isShadowRealm: function() { return true; },
};
await import("/resources/testharness.js");
%(script)s
await import("%(path)s");
})().then(resolve, (e) => reject(e.toString()));
}
`));
await fetch_tests_from_shadow_realm(r);
done();
})();
</script>
"""
def _script_replacement(self, key, value):
if key == "script":
return 'await import("%s");' % value
return None
class BaseWorkerHandler(WrapperHandler):
headers = [('Content-Type', 'text/javascript')]
def _meta_replacement(self, key, value):
return None
@abc.abstractmethod
def _create_script_import(self, attribute):
# Take attribute (a string URL to a JS script) and return JS source to import the script
# into the worker.
pass
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("\\", "\\\\").replace('"', '\\"')
return self._create_script_import(attribute)
if key == "title":
value = value.replace("\\", "\\\\").replace('"', '\\"')
return 'self.META_TITLE = "%s";' % value
return None
class ClassicWorkerHandler(BaseWorkerHandler):
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
isShadowRealm: function() { return false; },
};
importScripts("/resources/testharness.js");
%(script)s
importScripts("%(path)s");
done();
"""
def _create_script_import(self, attribute):
return 'importScripts("%s")' % attribute
class ModuleWorkerHandler(BaseWorkerHandler):
path_replace = [(".any.worker-module.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
isShadowRealm: function() { return false; },
};
import "/resources/testharness.js";
%(script)s
import "%(path)s";
done();
"""
def _create_script_import(self, attribute):
return 'import "%s";' % attribute
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
class RoutesBuilder:
def __init__(self, inject_script = None):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/results/", handlers.ErrorHandler(404))]
self.extra = []
self.inject_script_data = None
if inject_script is not None:
with open(inject_script, 'rb') as f:
self.inject_script_data = f.read()
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.extra
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_handler(self, method, route, handler):
self.extra.append((str(method), str(route), handler))
def add_static(self, path, format_args, content_type, route, headers=None):
if headers is None:
headers = {}
handler = handlers.StaticHandler(path, format_args, content_type, **headers)
self.add_handler("GET", str(route), handler)
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.worker-module.html", WorkerModulesHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.sharedworker.html", SharedWorkersHandler),
("GET", "*.any.sharedworker-module.html", SharedWorkerModulesHandler),
("GET", "*.any.serviceworker.html", ServiceWorkersHandler),
("GET", "*.any.serviceworker-module.html", ServiceWorkerModulesHandler),
("GET", "*.any.shadowrealm.html", ShadowRealmHandler),
("GET", "*.any.worker.js", ClassicWorkerHandler),
("GET", "*.any.worker-module.js", ModuleWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("*", "/.well-known/attribution-reporting/report-event-attribution", handlers.PythonScriptHandler),
("*", "/.well-known/attribution-reporting/debug/report-event-attribution", handlers.PythonScriptHandler),
("*", "/.well-known/attribution-reporting/report-aggregate-attribution", handlers.PythonScriptHandler),
("*", "/.well-known/attribution-reporting/debug/report-aggregate-attribution", handlers.PythonScriptHandler),
("*", "/.well-known/attribution-reporting/debug/verbose", handlers.PythonScriptHandler),
("*", "/.well-known/private-aggregation/*", handlers.PythonScriptHandler),
("*", "/.well-known/web-identity", handlers.PythonScriptHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
handler = handler_cls(base_path=path, url_base=url_base)
if self.inject_script_data is not None:
handler = HtmlScriptInjectorHandlerWrapper(inject=self.inject_script_data, wrap=handler)
self.mountpoint_routes[url_base].append(
(method,
"%s%s" % (url_base if url_base != "/" else "", suffix),
handler))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def get_route_builder(logger, aliases, config):
builder = RoutesBuilder(config.inject_script)
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder
class ServerProc:
def __init__(self, mp_context, scheme=None):
self.proc = None
self.daemon = None
self.mp_context = mp_context
self.stop_flag = mp_context.Event()
self.scheme = scheme
def start(self, init_func, host, port, paths, routes, bind_address, config, log_handlers, **kwargs):
self.proc = self.mp_context.Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_address,
config, log_handlers),
name='%s on port %s' % (self.scheme, port),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_address,
config, log_handlers, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
importlib.reload(logging)
logger = get_logger(config.logging["level"], log_handlers)
if sys.platform == "darwin":
# on Darwin, NOFILE starts with a very low limit (256), so bump it up a little
# by way of comparison, Debian starts with a limit of 1024, Windows 512
import resource # local, as it only exists on Unix-like systems
maxfilesperproc = int(subprocess.check_output(
["sysctl", "-n", "kern.maxfilesperproc"]
).strip())
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# 2048 is somewhat arbitrary, but gives us some headroom for wptrunner --parallel
# note that it's expected that 2048 will be the min here
new_soft = min(2048, maxfilesperproc, hard)
if soft < new_soft:
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
try:
self.daemon = init_func(logger, host, port, paths, routes, bind_address, config, **kwargs)
except OSError:
logger.critical("Socket error on port %s" % port, file=sys.stderr)
raise
except Exception:
logger.critical(traceback.format_exc())
raise
if self.daemon:
try:
self.daemon.start()
try:
self.stop_flag.wait()
except KeyboardInterrupt:
pass
finally:
self.daemon.stop()
except Exception:
logger.critical(traceback.format_exc())
raise
def request_shutdown(self):
if self.is_alive():
self.stop_flag.set()
def wait(self, timeout=None):
self.proc.join(timeout)
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(logger, config, routes, mp_context, log_handlers):
paths = config.paths
bind_address = config.bind_address
host = config.server_host
port = get_port()
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc(mp_context)
wrapper.start(start_http_server, host, port, paths, routes,
bind_address, config, log_handlers)
url = f"http://{host}:{port}/"
connected = False
for i in range(10):
try:
urllib.request.urlopen(url)
connected = True
break
except urllib.error.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server "
"on {}. {}".format(url, EDIT_HOSTS_HELP))
sys.exit(1)
for domain in config.domains_set:
if domain == host:
continue
try:
urllib.request.urlopen("http://%s:%d/" % (domain, port))
except Exception:
logger.critical(f"Failed probing domain {domain}. {EDIT_HOSTS_HELP}")
sys.exit(1)
wrapper.request_shutdown()
wrapper.wait()
def make_hosts_file(config, host):
rv = []
for domain in config.domains_set:
rv.append("%s\t%s\n" % (host, domain))
# Windows interpets the IP address 0.0.0.0 as non-existent, making it an
# appropriate alias for non-existent hosts. However, UNIX-like systems
# interpret the same address to mean any IP address, which is inappropraite
# for this context. These systems do not reserve any value for this
# purpose, so the inavailability of the domains must be taken for granted.
#
# https://github.com/web-platform-tests/wpt/issues/10560
if platform.uname()[0] == "Windows":
for not_domain in config.not_domains_set:
rv.append("0.0.0.0\t%s\n" % not_domain)
return "".join(rv)
def start_servers(logger, host, ports, paths, routes, bind_address, config,
mp_context, log_handlers, **kwargs):
servers = defaultdict(list)
for scheme, ports in ports.items():
assert len(ports) == {"http": 2, "https": 2}.get(scheme, 1)
# If trying to start HTTP/2.0 server, check compatibility
if scheme == 'h2' and not http2_compatible():
logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' +
'Requires OpenSSL 1.0.2+')
continue
# Skip WebTransport over HTTP/3 server unless if is enabled explicitly.
if scheme == 'webtransport-h3' and not kwargs.get("webtransport_h3"):
continue
for port in ports:
if port is None:
continue
init_func = {
"http": start_http_server,
"http-private": start_http_server,
"http-public": start_http_server,
"https": start_https_server,
"https-private": start_https_server,
"https-public": start_https_server,
"h2": start_http2_server,
"ws": start_ws_server,
"wss": start_wss_server,
"webtransport-h3": start_webtransport_h3_server,
}[scheme]
server_proc = ServerProc(mp_context, scheme=scheme)
server_proc.start(init_func, host, port, paths, routes, bind_address,
config, log_handlers, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def startup_failed(logger):
logger.critical(EDIT_HOSTS_HELP)
sys.exit(1)
def start_http_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
except Exception:
startup_failed(logger)
def start_https_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
except Exception:
startup_failed(logger)
def start_http2_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
handler_cls=wptserve.Http2WebTestRequestHandler,
doc_root=paths["doc_root"],
ws_doc_root=paths["ws_doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"),
http2=True)
except Exception:
startup_failed(logger)
class WebSocketDaemon:
def __init__(self, host, port, doc_root, handlers_root, bind_address, ssl_config):
logger = logging.getLogger()
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root]
if ssl_config is not None:
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"]]
if (bind_address):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
if not ports:
# TODO: Fix the logging configuration in WebSockets processes
# see https://github.com/web-platform-tests/wpt/issues/22719
logger.critical("Failed to start websocket server on port %s, "
"is something already using that port?" % port, file=sys.stderr)
raise OSError()
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self):
self.started = True
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def start_ws_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
ssl_config=None)
except Exception:
startup_failed(logger)
def start_wss_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
config.ssl_config)
except Exception:
startup_failed(logger)
def start_webtransport_h3_server(logger, host, port, paths, routes, bind_address, config, **kwargs):
try:
# TODO(bashi): Move the following import to the beginning of this file
# once WebTransportH3Server is enabled by default.
from webtransport.h3.webtransport_h3_server import WebTransportH3Server # type: ignore
return WebTransportH3Server(host=host,
port=port,
doc_root=paths["doc_root"],
cert_path=config.ssl_config["cert_path"],
key_path=config.ssl_config["key_path"],
logger=logger)
except Exception as error:
logger.critical(
f"Failed to start WebTransport over HTTP/3 server: {error}")
sys.exit(0)
def start(logger, config, routes, mp_context, log_handlers, **kwargs):
host = config["server_host"]
ports = config.ports
paths = config.paths
bind_address = config["bind_address"]
logger.debug("Using ports: %r" % ports)
servers = start_servers(logger, host, ports, paths, routes, bind_address, config, mp_context,
log_handlers, **kwargs)
return servers
def iter_servers(servers):
for servers in servers.values():
for port, server in servers:
yield server
def _make_subdomains_product(s: Set[str], depth: int = 2) -> Set[str]:
return {".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
_subdomains = {"www",
"www1",
"www2",
"天気の良い日",
"élève"}
_not_subdomains = {"nonexistent"}
_subdomains = _make_subdomains_product(_subdomains)
_not_subdomains = _make_subdomains_product(_not_subdomains)
class ConfigBuilder(config.ConfigBuilder):
"""serve config
This subclasses wptserve.config.ConfigBuilder to add serve config options.
"""
_default = {
"browser_host": "web-platform.test",
"alternate_hosts": {
"alt": "not-web-platform.test"
},
"doc_root": repo_root,
"ws_doc_root": os.path.join(repo_root, "websockets", "handlers"),
"server_host": None,
"ports": {
"http": [8000, "auto"],
"http-private": ["auto"],
"http-public": ["auto"],
"https": [8443, 8444],
"https-private": ["auto"],
"https-public": ["auto"],
"ws": ["auto"],
"wss": ["auto"],
"webtransport-h3": ["auto"],
},
"check_subdomains": True,
"bind_address": True,
"ssl": {
"type": "pregenerated",
"encrypt_after_connect": False,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"password": "web-platform-tests",
"force_regenerate": False,
"duration": 30,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.key"),
"host_cert_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.pem")
},
"none": {}
},
"aliases": [],
"logging": {
"level": "info",
"suppress_handler_traceback": False
}
}
computed_properties = ["ws_doc_root"] + config.ConfigBuilder.computed_properties
def __init__(self, logger, *args, **kwargs):
if "subdomains" not in kwargs:
kwargs["subdomains"] = _subdomains
if "not_subdomains" not in kwargs:
kwargs["not_subdomains"] = _not_subdomains
super().__init__(
logger,
*args,
**kwargs
)
with self as c:
browser_host = c.get("browser_host")
alternate_host = c.get("alternate_hosts", {}).get("alt")
if not domains_are_distinct(browser_host, alternate_host):
raise ValueError(
"Alternate host must be distinct from browser host"
)
def _get_ws_doc_root(self, data):
if data["ws_doc_root"] is not None:
return data["ws_doc_root"]
else:
return os.path.join(data["doc_root"], "websockets", "handlers")
def _get_paths(self, data):
rv = super()._get_paths(data)
rv["ws_doc_root"] = data["ws_doc_root"]
return rv
def build_config(logger, override_path=None, config_cls=ConfigBuilder, **kwargs):
rv = config_cls(logger)
enable_http2 = kwargs.get("h2")
if enable_http2 is None:
enable_http2 = True
if enable_http2:
rv._default["ports"]["h2"] = [9000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
if kwargs.get("verbose"):
rv.logging["level"] = "DEBUG"
setattr(rv, "inject_script", kwargs.get("inject_script"))
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
parser.add_argument("--inject-script", default=None,
help="Path to script file to inject, useful for testing polyfills.")
parser.add_argument("--alias_file", action="store", dest="alias_file",
help="File with entries for aliases/multiple doc roots. In form of `/ALIAS_NAME/, DOC_ROOT\\n`")
parser.add_argument("--h2", action="store_true", dest="h2", default=None,
help=argparse.SUPPRESS)
parser.add_argument("--no-h2", action="store_false", dest="h2", default=None,
help="Disable the HTTP/2.0 server")
parser.add_argument("--webtransport-h3", action="store_true",
help="Enable WebTransport over HTTP/3 server")
parser.add_argument("--exit-after-start", action="store_true", help="Exit after starting servers")
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
parser.set_defaults(report=False)
parser.set_defaults(is_wave=False)
return parser
class MpContext:
def __getattr__(self, name):
return getattr(multiprocessing, name)
def get_logger(log_level, log_handlers):
"""Get a logger configured to log at level log_level
If the logger has existing handlers the log_handlers argument is ignored.
Otherwise the handlers in log_handlers are added to the logger. If there are
no log_handlers passed and no configured handlers, a stream handler is added
to the logger.
Typically this is called once per process to set up logging in that process.
:param log_level: - A string representing a log level e.g. "info"
:param log_handlers: - Optional list of Handler objects.
"""
logger = logging.getLogger()
logger.setLevel(getattr(logging, log_level.upper()))
if not logger.hasHandlers():
if log_handlers is not None:
for handler in log_handlers:
logger.addHandler(handler)
else:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("[%(asctime)s %(processName)s] %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def run(config_cls=ConfigBuilder, route_builder=None, mp_context=None, log_handlers=None,
**kwargs):
logger = get_logger("INFO", log_handlers)
if mp_context is None:
if hasattr(multiprocessing, "get_context"):
mp_context = multiprocessing.get_context()
else:
mp_context = MpContext()
with build_config(logger,
os.path.join(repo_root, "config.json"),
config_cls=config_cls,
**kwargs) as config:
# This sets the right log level
logger = get_logger(config.logging["level"], log_handlers)
bind_address = config["bind_address"]
if kwargs.get("alias_file"):
with open(kwargs["alias_file"]) as alias_file:
for line in alias_file:
alias, doc_root = (x.strip() for x in line.split(','))
config["aliases"].append({
'url-path': alias,
'local-dir': doc_root,
})
if route_builder is None:
route_builder = get_route_builder
routes = route_builder(logger, config.aliases, config).get_routes()
if config["check_subdomains"]:
check_subdomains(logger, config, routes, mp_context, log_handlers)
stash_address = None
if bind_address:
stash_address = (config.server_host, get_port(""))
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(logger, config, routes, mp_context, log_handlers, **kwargs)
if not kwargs.get("exit_after_start"):
try:
# Periodically check if all the servers are alive
server_process_exited = False
while not server_process_exited:
for server in iter_servers(servers):
server.proc.join(1)
if not server.proc.is_alive():
server_process_exited = True
break
except KeyboardInterrupt:
pass
failed_subproc = 0
for server in iter_servers(servers):
logger.info('Status of subprocess "%s": running', server.proc.name)
server.request_shutdown()
for server in iter_servers(servers):
server.wait(timeout=1)
if server.proc.exitcode == 0:
logger.info('Status of subprocess "%s": exited correctly', server.proc.name)
else:
subproc = server.proc
logger.warning('Status of subprocess "%s": failed. Exit with non-zero status: %d',
subproc.name, subproc.exitcode)
failed_subproc += 1
return failed_subproc
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
988,797 | 45dd076e67e7b0aa3e828ae9f09e605be013a1f6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@FileName: function_return.py
@Function: python function return
@Author: Zhihe An
@Site: https://chegva.com
@Time: 2021/7/4
"""
"""一、函数的定义之多个返回值"""
"""
如果需要在调用函数后有多个返回值,可以在定义函数时在函数体内使用return语句返回由多个返回值组成的元组
"""
# 把列表中的所有数分成奇数和偶数两类
def classify_numbers(numbers):
odds = []
evens = []
# 使用列表生成式之if-else
# [odds.append(number) if number % 2 else evens.append(number) for number in numbers]
for number in numbers:
if number % 2:
odds.append(number)
else:
evens.append(number)
return odds, evens
print(classify_numbers([15, 86, 39, 26, 53, 68])) # ([15, 39, 53], [86, 26, 68])
# 查找列表中的最小值和最大值
def lookup_min_max(numbers):
if len(numbers) == 0:
return
min_num = max_num = numbers[0]
for number in numbers[1:len(numbers)]:
if number < min_num:
min_num = number
elif number > max_num:
max_num = number
return min_num, max_num
print(lookup_min_max([35, 26, 19, 86, 93, 68])) # (19, 93) |
988,798 | 1889c46f33f9ecd02dfc6d50f96d51af9d47512a | # the difference between list and tuple :
# tuple is constant type
# tuple 可以作为函数的参数或者返回值来使用
# 系统处理tuple更快
# tuple 可以作为dict的key list不能做dict的key
a = (1, 2, 3)
print(type(a))
print(a)
b = (1)
print(type(b))
b = (1,)
print(type(b))
a = 1, 2, 3
print(type(a))
print(a + b)
print(a * 3)
print(a.count(1))
print(a.index(3))
# 关联性强的情况下用tuple
position = (12, 10)
color = (255, 255, 255)
t = 1, 2, 3
aa, bb, cc = t
print(f"t = {t} aa = {aa}, bb = {bb}, cc = {cc} ")
|
988,799 | 8179ffa60cdec3c5b9e825826942fa85544f3074 | import socket
from app.controller import verify
from app.models import Admin
from flask import request, render_template, jsonify, session, redirect, url_for
@verify
def pass_change():
return render_template('pass_change.html')
@verify
def pass_change_submit():
user = Admin.query.filter(Admin.userid == session['userid']).first()
if user.password != request.form['password']:
return jsonify({'status': 0})
if request.form['new_password'] != request.form['confirm_password']:
return jsonify({'status': 1})
res = Admin.query.filter(Admin.userid == session['userid']).update({Admin.password: request.form['new_password'] })
if res == None:
return jsonify({'status': 2})
else:
return jsonify({'status': 3})
@verify
def system_help():
return render_template('system_help.html')
@verify
def main():
server_name = socket.getfqdn(socket.gethostname( ))
server_addr = socket.gethostbyname(server_name)
return render_template('main.html', user_agent = request.user_agent, remote_addr = request.remote_addr, server_name = server_name, server_addr = server_addr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.