index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
985,200 | f485e638d00518b0c92c5f4bfc20a01e5e4d4909 | from output.models.ibm_data.valid.d3_4_28.d3_4_28v02_xsd.d3_4_28v02 import (
DTimeStampEnumeration,
Root,
)
__all__ = [
"DTimeStampEnumeration",
"Root",
]
|
985,201 | 31c4affe8c272546d0bc6f91a514b56ca6c42cd5 | #!/usr/bin/env python
import os
import sqlite3
import subprocess
import sys
if "REMOTE_ADDR" in os.environ:
print "Content/type: text/html"
print ""
print "Wrong page"
exit(0)
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from my_db import db_exec_sql
#This is not a CGI
queue = db_exec_sql('select * from queue where done=False order by date asc')
if queue != None:
for i in queue:
commandline=u"""kadmin -p automator/admin -k -t /etc/krb5.keytab -q "change_password -pw %s %s" """ % (i[2], i[1], )
try:
retcode = subprocess.call(commandline, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
if retcode == 0:
db_exec_sql('update queue set done=True where username = %s and password = %s', (i[1],i[2]))
else:
print >>sys.stderr, "Child returned", retcode
except OSError as e:
print >>sys.stderr, "Execution failed:", e
commandline=u"""kadmin -p automator/admin -k -t /etc/krb5.keytab -q "modprinc +needchange %s" """ % (i[1], )
try:
retcode = subprocess.call(commandline, shell=True)
except OSError as e:
print >>sys.stderr, "Execution failed:", e
|
985,202 | 63e68b40e94ab1f49f8624b9c810c14059536fcc | '''
Created on 2019. 6. 11.
@author: Playdata
'''
import numpy as np
import csv
import pandas as pd
import math
# 1. 데이터 불러오기 불러온 데이터는 dataframe 형태로 지정되어야 함
data = pd.read_pickle('matrix_news20190611.pkl')
# print(data.columns)
# print(data)
matrix_data = data.as_matrix()
print(matrix_data.shape[1])
#TF = np.matrix()
#print(TF)
#for i in range(len(matrix_data)):
#TF
# TF = 문서 내 단어가 있는 총 횟수
#IDF
# log 전체문서 수/해당 단어가 들어가 있는 문서 수
for i in range(len(matrix_data)):
count =0
for j in range(matrix_data.shape[1]):
if matrix_data[i][j] == 0:
count+=1
print(str(i) + '번 단어의 IDF는 '+ str(math.log(len(matrix_data)/(len(matrix_data)-count))))
IDF = math.log(len(matrix_data)/(len(matrix_data)-count))
# TF * IDF
for j in range(1,matrix_data.shape[1]): # 만일 데이터 단어의 이름이 안들어가 있다면 range를 고칠 것
matrix_data[i][j] = math.log(matrix_data[i][j]+1) * IDF
#print(matrix_data[::,1])
#print(data[data.columns[1]])
# Matrix -> Dataframe = df의 value 값을 matrix에 있는 값으로 대체
#df 내에 있는 value 삭제
#print(data)
# df 내에 matrix 값으로 삽입
for i in range(1,matrix_data.shape[1]):
data[data.columns[i]] = matrix_data[::,i]
# print(data)
# csv 체크
data.to_csv('tf_to_csv.csv',encoding='utf-8')
# 이후 코사인 유사도 |
985,203 | 3559ff2848d33c5ddfc41d784c7468ca4560090c | print("test: ord('A')")
print(ord('A'))
print("test: chr(66)")
print(chr(66))
print("test: chr(25991)")
print(chr(25991))
print("test: 'ABC'.encode('ascii')")
print('ABC'.encode('ascii'))
print("test: len('ABC')")
print(len('ABC'))
print("test: 'Hi, %s, you have $%d.' % ('Michael', 1000000)")
print('Hi, %s, you have $%d.' % ('Michael', 1000000))
print("test: 'Hello, {0}, 成绩提升了 {1:.1f}%'.format('小明', 17.125)")
print('Hello, {0}, 成绩提升了 {1:.1f}%'.format('小明', 17.125))
|
985,204 | 61fd6f3be5c8279643f21ed03da5ad67b4394653 | # Generated by Django 2.1.5 on 2019-05-14 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('genatweetor', '0018_auto_20190514_2232'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='tweetID',
field=models.IntegerField(blank=True, primary_key=True, serialize=False),
),
]
|
985,205 | 36c25bc315f5b65d8d0d60b6e168953b04229792 | from django.apps import AppConfig
class DjangoCoreuiKitConfig(AppConfig):
name = 'django_coreuikit'
|
985,206 | fcc1f4e8908f810bbda828f75a818af9a56bc67a | import importlib
from iface import Distractor
mod = 'impl'
def get_interface(mod_name, iface):
iface_return = None
# Get information on the type of interface we are getting
iface_impl = iface()
iface_name = iface_impl.__class__
loaded_mod = importlib.import_module(mod)
for i in dir(loaded_mod):
mod_attr = getattr(loaded_mod, i)
if callable(mod_attr):
try:
t_created = mod_attr()
if isinstance(t_created, Distractor) and t_created.__class__ != iface_name:
iface_return = t_created
except:
pass
return iface_return
thing = get_interface(mod, Distractor)
thing.func("Dan")
quit()
|
985,207 | 84d2a93843ab86d5188712084368730a8e337f20 | import abc
class Vegetable (abc.ABC):
"""Vegetable est une classe abstraite ."""
@abc.abstractmethod
def grow(self, seed_number=0):
pass
# class Tomate(Vegetable):
# def grow(self, seed_number=0):
# return(seed_number)
# tomate = Tomate().grow(5)
# print(tomate)
|
985,208 | 3919ed1cc7e684aa8b8057fc73e4fc1d4ea29209 | """Generic mapping to Select statements"""
from sqlalchemy.test.testing import assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy import String, Integer, select
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, create_session
from sqlalchemy.test.testing import eq_
from test.orm import _base
# TODO: more tests mapping to selects
class SelectableNoFromsTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('common', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
Column('extra', String(45)))
@classmethod
def setup_classes(cls):
class Subset(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_no_tables(self):
selectable = select(["x", "y", "z"])
assert_raises_message(sa.exc.InvalidRequestError,
"Could not find any Table objects",
mapper, Subset, selectable)
@testing.emits_warning('.*creating an Alias.*')
@testing.resolve_artifact_names
def test_basic(self):
subset_select = select([common.c.id, common.c.data])
subset_mapper = mapper(Subset, subset_select)
sess = create_session(bind=testing.db)
sess.add(Subset(data=1))
sess.flush()
sess.expunge_all()
eq_(sess.query(Subset).all(), [Subset(data=1)])
eq_(sess.query(Subset).filter(Subset.data==1).one(), Subset(data=1))
eq_(sess.query(Subset).filter(Subset.data!=1).first(), None)
subset_select = sa.orm.class_mapper(Subset).mapped_table
eq_(sess.query(Subset).filter(subset_select.c.data==1).one(),
Subset(data=1))
|
985,209 | d0707e335be214945b809350eacf5224e8723ea0 | #!/usr/bin/env python3
import os, os.path
from getpass import getpass
class SSLOptions:
"""Captures standard SSL X509 client parametres.
Grab standard grid certificate environment into easier to access
fields: ``ca_path``, ``key_file``, ``cert_file`` and ``key_pass``.
Typically ``ca_path`` will be taken from $X509_CERT_DIR environment
variable, and ``key_file`` and ``cert_file`` from either
$X509_USER_PROXY or $X509_USER_CERT and $X509_USER_KEY environment
variables.
If the key file looks like it's a private key rather than a proxy,
i.e. key and cert files are different paths, the class constructor
will prompt the user for the key password. That password should be
offered to lower level HTTP library as the key password so it will
not prompt again. Note that the standard python ssl library cannot
take password as an argument, only the curl one can. In other words
you should probably use the curl library if you use this class and
it's possible the user supplies real key/cert rather than proxy.
If the environment variables are not set, the following defaults
are checked for existence:
* $X509_CERT_DIR: /etc/grid-security/certificates
* $X509_USER_KEY: $HOME/.globus/userkey.pem
* $X509_USER_CERT: $HOME/.globus/usercert.pem
If neither the standard environment variables nor the default path
locations exist, the constructor throws an exception."""
def __init__(self, proxy_only = False):
"""Initialise the SSL X509 options. If `proxy_only`, will never
prompt for password even if key and cert files are separate, on
the assumption this will only ever be used with proxies."""
self.key_file = None
self.cert_file = None
self.ca_path = None
self.key_pass = None
path = os.getenv("X509_CERT_DIR", None)
if path and os.path.exists(path):
self.ca_path = path
if not self.ca_path:
path = "/etc/grid-security/certificates"
if os.path.exists(path):
self.ca_path = path
path = os.getenv("X509_USER_PROXY", None)
if path and os.path.exists(path):
self.key_file = self.cert_file = path
if not self.key_file:
path = os.getenv("X509_USER_KEY", None)
if path and os.path.exists(path):
self.key_file = path
if not self.cert_file:
path = os.getenv("X509_USER_CERT", None)
if path and os.path.exists(path):
self.cert_file = path
if not self.key_file:
path = os.getenv("HOME") + "/.globus/userkey.pem"
if os.path.exists(path):
self.key_file = path
if not self.cert_file:
path = os.getenv("HOME") + "/.globus/usercert.pem"
if os.path.exists(path):
self.cert_file = path
if not self.ca_path or not os.path.exists(self.ca_path):
raise RuntimeError("no certificate directory found")
if not self.key_file or not os.path.exists(self.key_file):
raise RuntimeError("no certificate private key file found")
if not self.cert_file or not os.path.exists(self.cert_file):
raise RuntimeError("no certificate public key file found")
if not proxy_only and self.key_file != self.cert_file:
self.key_pass = getpass("Password for %s: " % self.key_file)
|
985,210 | 5d5b29c9197d99cad3bcedcfbd6e81568364e188 | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
import cohesity_management_sdk.models.cluster_config_proto_subnet
import cohesity_management_sdk.models.alias_smb_config
class ViewAliasInfo(object):
"""Implementation of the 'ViewAliasInfo' model.
View Alias Info is returned as part of list views.
Attributes:
alias_name (string): Alias name.
client_subnet_whitelist (list of ClusterConfigProtoSubnet): List of
external client subnet IPs that are allowed to access the share.
smb_config (AliasSmbConfig): Message defining SMB config for IRIS. SMB
config contains SMB encryption flags, SMB discoverable flag and
Share level permissions.
view_path (string): View path for the alias.
"""
# Create a mapping from Model property names to API property names
_names = {
"alias_name":'aliasName',
"client_subnet_whitelist":'clientSubnetWhitelist',
"smb_config":'smbConfig',
"view_path":'viewPath'
}
def __init__(self,
alias_name=None,
client_subnet_whitelist=None,
smb_config=None,
view_path=None):
"""Constructor for the ViewAliasInfo class"""
# Initialize members of the class
self.alias_name = alias_name
self.client_subnet_whitelist = client_subnet_whitelist
self.smb_config = smb_config
self.view_path = view_path
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
alias_name = dictionary.get('aliasName')
client_subnet_whitelist = None
if dictionary.get('clientSubnetWhitelist') != None:
client_subnet_whitelist = list()
for structure in dictionary.get('clientSubnetWhitelist'):
client_subnet_whitelist.append(cohesity_management_sdk.models.cluster_config_proto_subnet.ClusterConfigProtoSubnet.from_dictionary(structure))
smb_config = cohesity_management_sdk.models.alias_smb_config.AliasSmbConfig.from_dictionary(dictionary.get('smbConfig')) if dictionary.get('smbConfig') else None
view_path = dictionary.get('viewPath')
# Return an object of this model
return cls(alias_name,
client_subnet_whitelist,
smb_config,
view_path)
|
985,211 | df6b45298ae27d0cbc00da5efd431aa27c6eaca4 | import requests
import argparse
import os
import configparser
def setup_conf(config):
file = open("config.ini", "w")
print("Set up your access key and buckets")
config.add_section("Config")
access_key = input("Enter access key value:")
secret_key = input("Enter secret key value:")
bucket = input("Enter bucket name:")
config.set("Config","access-key",access_key)
config.set("Config","secret-key",secret_key)
config.set("Config","bucketName",bucket)
config.write(file)
file.close()
return config
def ls(args, config):
headers = {}
headers['access-key'] = config['access-key']
headers['secret-key'] = config['secret-key']
if isinstance(args.S4Uri,type(None)):
headers['method'] = "ls"
headers['bucket-name'] = config['bucketName']
elif "s4://" in args.S4Uri:
temp = args.S4Uri[5:]
headers['item'] = temp[temp.find("/")+1:]
headers['method'] = "ls"
headers['bucket-name'] = temp[:temp.find("/")]
print(args.S4Uri[5:])
elif "/" in args.S4Uri:
temp = args.S4Uri
headers['item'] = temp[temp.find("/")+1:]
print(temp)
else:
headers['item'] = args.S4Uri
headers['method'] = "ls"
headers['bucket-name'] = config['bucketName']
proxydict = { "http" : "http://127.0.0.1:8080" }
print(headers)
response = requests.get("http://web3.crikey.ctf:8080", headers=headers)
#response = requests.get("http://web3.crikey.ctf:8080", headers=headers, proxies=proxydict)
return response.text
def cp(args, config):
headers = {}
headers['access-key'] = config['access-key']
headers['secret-key'] = config['secret-key']
headers['method'] = "cp"
# TODO add target function where I can specify the header
# headers['target'] = grab my args here
# proxydict = { "http" : "http://127.0.0.1:8080" }
# response = requests.get("http://web3.crikey.ctf:8080", headers=headers, proxies=proxydict)
response = requests.get("http://web3.crikey.ctf:8080", headers=headers)
return response.text
def setup_parser():
parser = argparse.ArgumentParser(description='Change the option prefix characters',prefix_chars='-',)
parser.add_argument("s4", help="The service to use (hint: there's only s4)", type=str)
# Now handle if ls or cp
subparsers = parser.add_subparsers(help='sub-command help')
parser_ls = subparsers.add_parser('ls', help='ls help')
parser_ls.add_argument("S4Uri", nargs='?', help="s4://bucketname or None", type=str)
parser_ls.set_defaults(func=ls)
parser_cp = subparsers.add_parser('cp', help='cp help')
parser_cp.add_argument("<filename>", nargs=2, help="S4Uri and localfile or localfile and S4Uri", type=str)
parser_cp.set_defaults(func=cp)
return parser
def main():
# flag{aye_double_u_ess_s4_tool}
# Set up or read config if it exists
config = configparser.ConfigParser()
if not os.path.exists("config.ini"):
config = setup_conf(config)
else:
try:
config.read("config.ini")
key_test = config["Config"]["access-key"]
except:
print("An error occurred with your config file, please recreate it")
config = setup_conf(config)
# Simplify access
config = config['Config']
parser = setup_parser()
args = parser.parse_args()
try:
print(args.func(args, config))
except AttributeError:
parser.print_help()
if __name__ == "__main__":
main()
|
985,212 | 38b4cf60a7e4e37af0d536c4dd990944b319ec06 | from __future__ import absolute_import
import numpy as np
from .Node import Op
from .._base import DNNL_LIB
from ..gpu_links import array_set
from ..cpu_links import array_set as cpu_array_set
class ZerosLikeOp(Op):
def __init__(self, node_A, ctx=None):
super().__init__(ZerosLikeOp, [node_A], ctx)
def compute(self, input_vals, output_val, stream_handle=None):
if self.on_cpu:
if DNNL_LIB['cpu_ArraySet']:
cpu_array_set(output_val, 0)
else:
output_val[:] = np.zeros(input_vals[0].asnumpy().shape)
else:
array_set(output_val, 0, stream_handle)
def gradient(self, output_grad):
return [zeroslike_op(self.inputs[0], ctx=self.raw_ctx)]
def infer_shape(self, input_shapes):
assert len(input_shapes) == 1
return input_shapes[0]
def zeroslike_op(node, ctx=None):
"""Creates a node that represents np.zeros(node_A.shape).
Parameters:
----
node : Node
The Node to pad with 0.
Returns:
----
A new Node instance created by Op.
"""
return ZerosLikeOp(node, ctx=ctx)
|
985,213 | b4b4ae8216c78cc76d8bdb1735a0da70c29730ce | from pynsp import obtcp
from pynsp import obudp
from agvshell.shproto import report as agvshreport
from agvshell.shproto.proto_head import *
import threading
from time import sleep
from agvmt.mtproto import discover
from agvinfo import agvinfodata
from copy import deepcopy
import pdb
from pynsp.logger import *
class agvinfo_runtime:
aid = 0
def __init__(self, _id, _host, _port, _mac, _shport):
self.id = _id
self.host = _host
self.port = _port
self.mac = _mac
self.shport = _shport
self.alive = 3
self.mtready = False
agv_online = dict() # map<mac, agvinfo_runtime>
mutex_agvonline = threading.RLock()
mutex_agvcfg = threading.RLock() # protected theR agvinfodata.dict_xml_agvinfo
current_max_agvid_config = 0
def agvinfo_search_bymac(mac):
agvbase = None
mutex_agvcfg.acquire()
for n in agvinfodata.dict_xml_agvinfo:
if mac == n.hwaddr:
agvbase = deepcopy(n)
break
mutex_agvcfg.release()
return agvbase
def agvinfo_search_unused():
agvbase = None
mutex_agvcfg.acquire()
for n in agvinfodata.dict_xml_agvinfo:
if 0 == len(n.hwaddr):
agvbase = deepcopy(n)
break
mutex_agvcfg.release()
return agvbase
# mutil-inherit will call __init__ method from left to right
# overwritten method of parent class, by the same order
class agvinfo_shellser(obudp.obudp, threading.Thread):
def __init__(self, _notify_changed = None):
super(agvinfo_shellser, self).__init__()
threading.Thread.__init__(self)
self.notify_changed = _notify_changed
self.__terminate = False
def __del__(self):
pass
# print('agvinfo_shellser __del__')
def __select_agv_bymac(self, _rep, _from, _port):
global current_max_agvid_config
mutex_agvonline.acquire()
if _rep.mac.value not in agv_online:
mutex_agvonline.release()
# select agv from total table, not need locked by mutex_agvonline
agv_exist = agvinfo_search_bymac(_rep.mac.value)
if None != agv_exist:
# use existing agv item
agv = agvinfo_runtime(agv_exist.vhid, _from, _port, _rep.mac.value, _rep.sh_port)
else:
agv_unsed = agvinfo_search_unused()
if None != agv_unsed:
agv = agvinfo_runtime(agv_unsed.vhid, _from, _port, _rep.mac.value, _rep.sh_port)
else:
# increase current max agvid, and then, push it into runtime queue
current_max_agvid_config += 1
agv = agvinfo_runtime(current_max_agvid_config, _from, _port, _rep.mac.value, _rep.sh_port)
return agv, True
else:
agv = agv_online[_rep.mac.value]
mutex_agvonline.release()
return agv, False
def on_shell_report(self, _data, _cb, _from, _port):
rep = agvshreport.agvsh_local_report()
rep.build(_data, 0)
# if len(rep.mac.value) == 0:
# print('agv_shell mac is empty')
# return
#print('online:{0},{1},{2}'.format(_from,_port,rep.sh_port))
need_notify = False
# choose agv itme to save online information
agv, new = self.__select_agv_bymac(rep, _from, _port)
if not new:
mutex_agvonline.acquire()
agv.host = _from
agv.port = _port
agv.shport = rep.sh_port
# reset keepalive counter
agv.alive = 3
mutex_agvonline.release()
else:
mutex_agvonline.acquire()
agv_online[agv.mac] = agv
mutex_agvonline.release()
Logger().get_logger().info('aginfoser agv id={} mac= {} online.'.format(agv.id, agv.mac))
# update config file any way
cfgagv = agvinfodata.agvinfo_t()
cfgagv.vhid = agv.id
cfgagv.inet = agv.host
cfgagv.shport = agv.shport
cfgagv.hwaddr = agv.mac
mutex_agvcfg.acquire()
agvinfodata.update_agvinfo(cfgagv)
mutex_agvcfg.release()
# need to call callback method if existed
need_notify = True
# notify calling thread, agv info changed
if None != self.notify_changed and need_notify:
self.notify_changed()
def on_mt_discover_ack(self, _from):
Logger().get_logger().info('on_mt_discover_ack')
mutex_agvonline.acquire()
ls_keys = list(agv_online.keys())
for i in ls_keys:
agv = agv_online[i]
if agv.host == _from:
agv.mtready = True
# print('[', agv.id, ']', agv.host,'mt ready')
need_notify = True
break
mutex_agvonline.release()
# print('notify:{0},need_notify:{1}'.format(self.notify_changed,need_notify))
if None != self.notify_changed and need_notify:
self.notify_changed()
def on_recvdata(self, _data, _cb, _from, _port):
# parse report package
phead = proto_head()
phead.build(_data, 0)
if phead.type == agvshreport.kAgvShellProto_LocalInfo:
self.on_shell_report(_data, _cb, _from, _port)
elif phead.type == discover.PKTTYPE_KEEPALIVE_UDP_ACK:
self.on_mt_discover_ack(_from)
def on_closed(self, _previous):
self.__terminate = True
self.join()
def run(self):
while not self.__terminate:
need_notify = False
mutex_agvonline.acquire()
# if not convert to list type,code in 'for' block will get following error:
# RuntingError'dictionary changed size during iteration'
ls_keys = list(agv_online.keys())
for i in ls_keys:
agv = agv_online[i]
agv.alive -= 1
if agv.alive == 0:
Logger().get_logger().info('aginfoser agv id:{} mac:{} has been removed.'.format(agv.id,agv.mac))
need_notify = True
del(agv_online[i])
# condition for test motion_template is existed.
elif agv.mtready == False:
self.__test_mtready(agv.host)
mutex_agvonline.release()
# notify calling thread, agv info changed
if None != self.notify_changed and need_notify:
self.notify_changed()
# 3 times * 2 seconds, keepalive failed.
sleep(2)
def create(self, _host = '0.0.0.0', _port = 9022)->int:
self.start()
return super(agvinfo_shellser, self).create(host = _host, port = _port)
def __test_mtready(self, _host, _port = 4409):
pkt_discover_mt = discover.proto_discover_mt()
pkt_discover_mt.phead.size(pkt_discover_mt.length())
stream = pkt_discover_mt.serialize()
self.send(stream, pkt_discover_mt.length(), _host, _port)
def shclosed(self, _mac):
need_notify = False
mutex_agvonline.acquire()
if _mac not in agv_online:
pass
else:
agv = agv_online[_mac]
Logger().get_logger().info('aginfoser agv id:{} mac:{} has been force removed.'.format(agv.id,agv.mac))
del(agv_online[_mac])
need_notify = True
mutex_agvonline.release()
if need_notify:
self.notify_changed()
def mtclosed(self, _mac):
need_notify = False
mutex_agvonline.acquire()
if _mac not in agv_online:
pass
else:
agv = agv_online[_mac]
Logger().get_logger().info('aginfoser agv id:{} mac:{} mt closed.'.format(agv.id,agv.mac))
agv.mtready = False
need_notify = True
mutex_agvonline.release()
if need_notify:
self.notify_changed()
ser = None
def agvinfoser_startup(_host = '0.0.0.0', _port = 9022, _notify_changed = None)->int:
global ser
global current_max_agvid_config
# load agvinfo from config file agv_info.xml
agvinfodata.load_agvinfo_xml()
# calc maximum vehicle id of current agv list
for n in agvinfodata.dict_xml_agvinfo:
if (type(n.vhid) == int) and (n.vhid > current_max_agvid_config):
current_max_agvid_config = n.vhid
Logger().get_logger().info('current maximum agvid={0}'.format(current_max_agvid_config))
# create udp service
if ser == None:
ser = agvinfo_shellser(_notify_changed)
if ser.create(_host, _port) < 0:
Logger().get_logger().error('failed create udp server for agvinfo.')
del(ser)
ser = None
return -1
Logger().get_logger().info('agvinfo server startup successful.')
return 0
def agvinfoser_shclosed(_mac):
ser.shclosed(_mac)
def agvinfoser_mtclosed(_mac):
ser.mtclosed(_mac)
def agvinfoser_stop():
global ser
ser.close()
# threading.Thread can only start once
# is necessary to delete @ser object when stop method called
del(ser)
ser = None
def agvinfoser_getagvs()->dict:
mutex_agvonline.acquire()
d = agv_online.copy()
mutex_agvonline.release()
return d
def agvinfoser_getoffline()->list:
mutex_agvonline.acquire()
online = agv_online.copy()
mutex_agvonline.release()
mutex_agvcfg.acquire()
config = deepcopy(agvinfodata.dict_xml_agvinfo)
mutex_agvcfg.release()
offline = list()
for c in config:
if (len(c.hwaddr) == 0) or (c.hwaddr not in online):
offline.append(c)
return offline |
985,214 | e7feaa7817ba6a68075e520b1fe18399711cd6e2 | import numpy as np
def gram_matrix(k, points):
# todo: cythonize Gram matrix.
ret = np.empty((points.shape[0], points.shape[0]))
for i in xrange(points.shape[0]):
for j in xrange(i, points.shape[0]):
ret[i,j] = k(points[i,:], points[j,:])
ret[j,i] = ret[i,j]
return ret
def bq_weights(mu_p, design_points, kernel):
"""
points will be a matrix of shape (n_points, d)
"""
cov_p = gram_matrix(kernel, design_points)
# TODO: this is going to be ill conditioned
cov_p_inv = np.linalg.inv(cov_p)
return mu_p.T.dot(cov_p_inv)
def frank_wolfe_scores(cur_weights, cur_points, test_points, k, kernel_mean_map):
assert len(cur_weights) == len(cur_points)
scores = np.empty(test_points.shape[0])
# todo: nested loop is bad news.
for i in xrange(len(test_points)):
x = test_points[i,:]
k_evals = np.array([k(x, cur_points[j,:]) for j in xrange(cur_points.shape[0])])
scores[i] = np.sum(cur_weights * k_evals) - kernel_mean_map(x)
return scores
def frank_wolfe_weights(rhos, iteration):
ret = np.empty(iteration)
# todo: this is really confusing, need to go through with FX.
for l in xrange(iteration):
prev_rho = rhos[l-1] if l > 0 else 1. # rho_0 = 1
ret[l] = np.prod(1-rhos[l:iteration-1])*prev_rho
return ret
def frank_wolfe_steps(method, iterations):
if method == 'kernel-herding':
return 1. / (np.arange(iterations) + 2)
raise Exception('Method {} not understood.'.format(method))
def frank_wolfe_step_line_search(new_pt, cur_weights, cur_points, kernel, kernel_mean_map):
gram = gram_matrix(kernel, cur_points)
term1 = cur_weights.dot(gram).dot(cur_weights)
term2 = 0.
weighted_mean_map = 0.
for pt, wt in zip(cur_points, cur_weights):
term2 += wt*kernel(pt, new_pt)
weighted_mean_map += wt*kernel_mean_map(pt)
numerator = term1 - term2 - weighted_mean_map + kernel_mean_map(new_pt)
denominator = term1 - 2*term2 + kernel(new_pt, new_pt)
return numerator / denominator
def frank_wolfe(initial_point, iterations, kernel, kernel_mean_map, test_points, steps='line-search'):
"""
Determine Frank-Wolfe design points for the supplied kernel and mean-map.
:param initial_point: The seed point for the Frank-Wolfe algorithm.
:param iterations: The number of iterations.
:param kernel: The kernel
:param kernel_mean_map: The kernel mean map.
:param test_points: Set of candidate points for the Frank-Wolfe algorithm
:param steps: The step sizes for the Frank-Wolfe algorithm
:return:
"""
line_search = False
if steps == 'line-search':
line_search = True
rho_arr = np.empty(iterations)
rho_arr[0] = frank_wolfe_step_line_search(initial_point, np.zeros((0)), np.zeros((0,2)), kernel, kernel_mean_map)
elif type(steps) is str:
rho_arr = frank_wolfe_steps(steps, iterations)
elif type(steps) in [list, np.ndarray]:
rho_arr = np.asarray(steps)
else:
raise Exception("Don't understand rho_method={}".format(steps))
assert len(rho_arr) == iterations
ret = np.empty((iterations, initial_point.shape[1]))
ret[0, :] = initial_point
for i in xrange(1, iterations):
# todo: optimal weights
weights = frank_wolfe_weights(rho_arr, i)
scores = frank_wolfe_scores(weights, ret[:i, :], test_points, kernel, kernel_mean_map)
best_score_ix = np.argmin(scores)
new_pt = test_points[best_score_ix, :]
ret[i, :] = new_pt
if line_search:
rho_arr[i] = frank_wolfe_step_line_search(new_pt, weights, ret[:i, :], kernel, kernel_mean_map)
final_weights = frank_wolfe_weights(rho_arr, iterations)
return ret, final_weights
def fwbq(integrand, x_0, kernel, kernel_mean_map, initial_error, fw_iterations, fw_test_points, fw_steps='line-search', return_points=False):
"""
Integrate function given by integrand using FWBQ
:param integrand: The function to integrate
:param x_0: The initial point with which to seed the Frank-Wolfe algorithm
:param kernel: The kernel to use for integration.
:param kernel_mean_map: Mean map for the kernel
:param initial_error: Integral of the kernel mean map; represents the maximum worst case error when approximating
with zero observations.
:param fw_iterations: Number of Frank-Wolfe points to use.
:param fw_test_points: The set of candidate points for the Frank-Wolfe algorithm
:param fw_step_method: Step method to use in Frank-Wolfe.
One of 'line-search', 'kernel-herding' or an array of step sizes.
'line-search': Use the line search method in each iteration to determine the optimal step size.
'kernel-herding': Corresponds to an equal weighting for each FW point.
If an array is passed the number of elements must be the same as fw_iterations.
:return: (mu, sigma), the mean and variance for the integral.
"""
# we are not interested in the FW weights
design_points, _ = frank_wolfe(x_0, fw_iterations, kernel, kernel_mean_map, fw_test_points, fw_steps)
fun_evals = integrand(design_points)
mu_p = np.empty((len(design_points), 1))
# todo: what's the best way to do this?
for i in xrange(len(design_points)):
mu_p[i, :] = kernel_mean_map(design_points[i,:])
weights = bq_weights(mu_p, design_points, kernel)
if return_points:
return design_points, weights
cov = initial_error - weights.dot(mu_p)
# todo: also return covariance here.
return weights.dot(fun_evals), cov
|
985,215 | 6afe91cd55e66b55024953be66bbf75dca70e618 | from PyQt5.QtGui import QPixmap
from QChess.src.pieces.Piece import Piece
from QChess.src.pieces.Blank import Blank
import operator
# Creation of the class Bishop from where the classes BBishop and WBishop will inherit
# This class inherits at the same time from the class Piece
class Bishop(Piece):
def __init__(self, game, x, y):
Piece.__init__(self, game, x, y)
def possible_movements(self):
# Creation of a list where the possible positions will be stored
positions = []
# Creation of a list with all the comparisons and operations that are going to be needed to check
# the four possible set of positions (usage of the operator library)
operators = [[operator.sub, operator.sub, operator.ge, operator.ge, 0, 0],
[operator.add, operator.sub, operator.le, operator.ge, 7, 0],
[operator.sub, operator.add, operator.ge, operator.le, 0, 7],
[operator.add, operator.add, operator.le, operator.le, 7, 7]]
# The while loop checks all the positions of the diagonals and append to the list the ones
# that don't go off the board and are blank squares
# All the ifs check if the last position appended of the diagonal is an enemy piece,
# in this case this position is appended to the list since it can be eaten
for j in range(len(operators)):
i = 1
while operators[j][2](operators[j][0](self.coords[0], i), operators[j][4]) and \
operators[j][3](operators[j][1](self.coords[1], i), operators[j][5]) \
and isinstance(self.game.pieces[operators[j][0](self.coords[0], i)][operators[j][1](self.coords[1], i)], Blank):
positions.append((operators[j][0](self.coords[0], i), operators[j][1](self.coords[1], i)))
i += 1
if operators[j][2](operators[j][0](self.coords[0], i), operators[j][4]) and \
operators[j][3](operators[j][1](self.coords[1], i), operators[j][5]) \
and self.game.pieces[operators[j][0](self.coords[0], i)][operators[j][1](self.coords[1], i)].color != self.color:
positions.append((operators[j][0](self.coords[0], i), operators[j][1](self.coords[1], i)))
return positions
# Creation of the class WBishop (the parameters that change are the image, the name and the color)
class WBishop(Bishop):
def __init__(self, game, x, y):
Bishop.__init__(self, game, x, y)
self.image_path = f"./images/{self.game.piece_set}/wB"
self.image = QPixmap(self.image_path)
self.name = "wB"
self.color = "w"
# Creation of the class BBishop (the parameters that change are the same than WBishop)
class BBishop(Bishop):
def __init__(self, game, x, y):
Bishop.__init__(self, game, x, y)
self.image_path = f"./images/{self.game.piece_set}/bB"
self.image = QPixmap(self.image_path)
self.name = "bB"
self.color = "b"
|
985,216 | 6fe4295a7e8cdccb4a0dc9d73ee8371613a72d05 | # coding:utf-8
# author: Tao yong
# Python
from die import Die
import pygal
#创建两个骰子对象
die_1 =Die()
die_2 =Die(10)
#掷几次骰子,并将结果存在一个列表里
results=[]
for roll_num in range(50000):
result=die_1.roll()+die_2.roll()
results.append(result)
#分析结果,每个数被掷到的次数
frequencies=[]
for value in range(1,die_1.num_sides+die_2.num_sides+1):
#列表的count方法,计算该元素在列表中出现了多少次
frequency=results.count(value)
frequencies.append(frequency)
#对结果进行可视化
#创建条形图实例
hist=pygal.Bar()
hist.title="Results of rolling two D6 1000 times."
hist.x_labels=['2','3','4','5','6','7','8','9','10','11','12','13','14','15','16',]
hist.x_title='Result'
hist.y_title='Frequency of Result'
hist.add('D6+D10',frequencies)
hist.render_to_file('die_visual.svg')
|
985,217 | 97c7bbea54ee0c3e027ad2059c87d7d2b4707d69 | class QueueP:
def __init__(self, size):
self.Queue = [0]*(size+1)
self.head = 0
self.tail = -1
self.size = size+1
self.max_size = size
def enqueue(self, obj):
self.tail += 1
if self.tail > self.size-1:
self.tail = 0
if self.head == self.tail+1 or (self.head == 0 and self.tail == self.size-1):
self.tail -= 1
print("Kolejka jest pełna")
return 0
self.Queue[self.tail] = obj
pre = self.tail
while True:
if self.Queue[pre] == 0 or self.Queue[pre-1] == 0:
break
elif pre == 0:
if self.Queue[pre] > self.Queue[self.size-1] != 0:
temp = self.Queue[pre]
self.Queue[pre] = self.Queue[self.size-1]
self.Queue[self.size-1] = temp
pre = self.size-1
continue
elif self.Queue[pre] > self.Queue[pre - 1] != 0:
temp = self.Queue[pre]
self.Queue[pre] = self.Queue[pre - 1]
self.Queue[pre - 1] = temp
pre -= 1
else:
break
def dequeue(self):
if self.tail+1 == self.head:
print("Pusta kolejka")
return 0
head = self.Queue[self.head]
self.Queue[self.head] = 0
self.head += 1
if self.head > self.size-1:
self.head = 0
return head
def read_input(file_name):
file = open(file_name, "r")
numbers = file.readline()
numbers = numbers.split(";")
que = QueueP(len(numbers))
for i in range(0, len(numbers)):
try:
que.enqueue(float(numbers[i]))
except:
print("Błędny znak")
print(que.Queue)
return que
def main():
q = read_input("input.txt")
q.dequeue()
q.dequeue()
q.dequeue()
q.enqueue(8)
q.enqueue(9)
q.dequeue()
print(q.Queue)
main()
|
985,218 | 95fb8bfcdb6da195573f6975c9e2a905bc7223b2 | from rest_framework import serializers, viewsets
from django.contrib.auth.models import User
from .models import Status
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'date_joined', 'url')
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('id')
serializer_class = UserSerializer
class StatusSerializer(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Status
fields = ('id', 'content', 'user', 'timestamp', 'url')
class StatusViewSet(viewsets.ModelViewSet):
queryset = Status.objects.all()
serializer_class = StatusSerializer
|
985,219 | d6e3cef6ceafbbd67279caf130d063d116724e98 | import time
import notify2
import cookielib
import urllib
import urllib2
import os
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
authentication_url = 'http://puzzlepedia.esy.es/adminlogin.php'
payload = {
'lemail': 'AdminEmail',
'lpwd': 'AdminPassword'
}
data = urllib.urlencode(payload)
req = urllib2.Request(authentication_url, data)
resp = urllib2.urlopen(req)
contents = resp.read()
url=urllib2.urlopen("http://puzzlepedia.esy.es/evaluate.php")
s = url.read()
while 'submission' in s:
# path to notification window icon
ICON_PATH = "/home/rahul/Desktop/PendingSubmissionNotifier/src/notify.png"
# initialise the d-bus connection
notify2.init("Pending Submission Notifier")
# create Notification object
n = notify2.Notification(None, icon = ICON_PATH)
# set urgency level
n.set_urgency(notify2.URGENCY_NORMAL)
# set timeout for a notification
n.set_timeout(10000)
# update notification data for Notification object
n.update("Puzzlepedia","Submission Checking Pending")
# show notification on screen
n.show()
a=300
b=2000
os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % ( a, b))
# short delay between notifications
time.sleep(15)
|
985,220 | c700c04d0a676fe75bea00ea2887beabf0319a20 | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import torch
from torch.optim import SGD
from torch import nn
# from models import noWeightsharingnoAuxloss,
# In[4]:
def train_model_auxloss(model,train_input,train_target,train_classes,optimizer,criterion,test_input,test_target,mini_batch_size = 100,nb_epochs = 100,print_progress= True):
validation_input = test_input
validation_target = test_target
validation_loss = []
train_loss = []
validation_acc = []
train_acc = []
batch_size = mini_batch_size
for e in range(nb_epochs):
acc_loss = 0
for b in range(0, train_input.size(0), mini_batch_size):
if b + batch_size > train_input.size(0):
batch_size = train_input.size(0) - b
else:
batch_size = mini_batch_size
digitResa,digitResb,output = model(train_input.narrow(0, b, batch_size))
loss = criterion(output, train_target.narrow(0, b, batch_size))+ criterion(digitResa,train_classes[:,0].narrow(0, b, batch_size)) + criterion(digitResb,train_classes[:,1].narrow(0, b, batch_size))
loss_acc = criterion(output, train_target.narrow(0, b, batch_size))
acc_loss = acc_loss + loss_acc.item()
model.zero_grad()
loss.backward()
optimizer.step()
if(print_progress):
##loss
with torch.no_grad():
model.eval()
_,_,val_out = model(validation_input)
val_loss = criterion((val_out),validation_target).item()
validation_loss.append(val_loss)
batches = int(train_input.size(0)/ mini_batch_size) if train_input.size(0)% mini_batch_size == 0 else int(train_input.size(0)/ mini_batch_size)+1
_,_,tl = model(train_input)
train_loss.append(criterion(tl,train_target).item())
##acc
_,_,out_train = model(train_input)
pred_train = torch.argmax((out_train),dim = 1)
pred_test = torch.argmax((val_out),dim = 1)
train_acc.append(pred_train[pred_train == train_target].shape[0]/pred_train.shape[0])
validation_acc.append(pred_test[pred_test == test_target].shape[0]/pred_test.shape[0])
model.train()
model.zero_grad()
print(e, acc_loss/batches)
return train_loss,validation_loss,train_acc,validation_acc
# In[2]:
def train_model(model,train_input,train_target,optimizer,criterion,test_input,test_target,mini_batch_size = 100,nb_epochs = 100,print_progress= True):
validation_input = test_input
validation_target = test_target
validation_loss = []
train_loss = []
validation_acc = []
train_acc = []
batch_size = mini_batch_size
for e in range(nb_epochs):
acc_loss = 0
for b in range(0, train_input.size(0), mini_batch_size):
if b + batch_size > train_input.size(0):
batch_size = train_input.size(0) - b
else:
batch_size = mini_batch_size
output = model(train_input.narrow(0, b, batch_size))
loss = criterion(output, train_target.narrow(0, b, batch_size))
acc_loss = acc_loss + loss.item()
model.zero_grad()
loss.backward()
optimizer.step()
if(print_progress):
##loss
with torch.no_grad():
model.eval()
val_loss = criterion((model(validation_input)),validation_target)
validation_loss.append(val_loss)
batches = int(train_input.size(0)/ mini_batch_size) if train_input.size(0)% mini_batch_size == 0 else int(train_input.size(0)/ mini_batch_size)+1
train_loss.append(criterion((model(train_input)),train_target).item())
##acc
pred_train = torch.argmax(model(train_input),dim = 1)
pred_test = torch.argmax((model(test_input)),dim = 1)
train_acc.append(pred_train[pred_train == train_target].shape[0]/pred_train.shape[0])
validation_acc.append(pred_test[pred_test == test_target].shape[0]/pred_test.shape[0])
model.train()
model.zero_grad()
print(e, acc_loss/batches)
return train_loss,validation_loss,train_acc,validation_acc
|
985,221 | 11149e6f5d1bde5b66f436abd96a2da3cee4b278 | #Claire Yegian
#12/14/17
#gameOfLife.py - creates Conway's Game of Life
from ggame import *
#Creates gameboard
def gameBoard():
return [[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0]]
#Prints gameboard (with any updates)
def redrawAll():
rowNum = 0
for row in data['gameBoard']:
columnNum = 0
for column in row:
if data['gameBoard'][rowNum][columnNum] == 0:
Sprite(data['deadCell'],(columnNum*50,rowNum*50))
if data['gameBoard'][rowNum][columnNum] == 1:
Sprite(data['liveCell'],(columnNum*50,rowNum*50))
columnNum += 1
rowNum += 1
#Finds out which box the user clicked, and changes the status of that cell (live vs dead)
def mouseClick(event):
if (event.x>175 and event.x<325) and (event.y>510 and event.y<550):
nextGeneration()
else:
row = event.x//50
column = event.y//50
if data['gameBoard'][column][row] == 0:
data['gameBoard'][column][row] = 1
else:
data['gameBoard'][column][row] = 0
redrawAll()
#Determines which boxes will live on to the next generation and which will die; resets gameboard with these updates
def nextGeneration():
data['gameBoardUpdate'] = gameBoard()
rowNum = 0
for row in data['gameBoard']:
columnNum = 0
for column in row:
numLive = numNeighbors(rowNum,columnNum)
if (data['gameBoard'][columnNum][rowNum] == 1) and numLive<2:
data['gameBoardUpdate'][columnNum][rowNum] = 0
if (data['gameBoard'][columnNum][rowNum] == 1) and numLive>3:
data['gameBoardUpdate'][columnNum][rowNum] = 0
if (data['gameBoard'][columnNum][rowNum] == 0) and numLive == 3:
data['gameBoardUpdate'][columnNum][rowNum] = 1
if (data['gameBoard'][columnNum][rowNum] == 1) and (numLive == 3 or numLive == 2):
data['gameBoardUpdate'][columnNum][rowNum] = 1
columnNum += 1
rowNum += 1
data['gameBoard'] = data['gameBoardUpdate']
redrawAll()
#finds and returns the number of living neighbors for each cell
def numNeighbors(rowNum,columnNum):
numNeighbors = 0
if columnNum<9 and data['gameBoard'][columnNum+1][rowNum] == 1:
numNeighbors = numNeighbors + 1
if columnNum<9 and rowNum<9 and data['gameBoard'][columnNum+1][rowNum+1] == 1:
numNeighbors = numNeighbors + 1
if columnNum<9 and rowNum>0 and data['gameBoard'][columnNum+1][rowNum-1] == 1:
numNeighbors = numNeighbors + 1
if columnNum>0 and data['gameBoard'][columnNum-1][rowNum] == 1:
numNeighbors = numNeighbors + 1
if columnNum>0 and rowNum<9 and data['gameBoard'][columnNum-1][rowNum+1] == 1:
numNeighbors = numNeighbors + 1
if columnNum>0 and rowNum>0 and data['gameBoard'][columnNum-1][rowNum-1] == 1:
numNeighbors = numNeighbors + 1
if rowNum<9 and data['gameBoard'][columnNum][rowNum+1] == 1:
numNeighbors = numNeighbors + 1
if rowNum>0 and data['gameBoard'][columnNum][rowNum-1] == 1:
numNeighbors = numNeighbors + 1
return(numNeighbors)
if __name__ == '__main__':
dead = Color(0xffffff,1) #Colors used in program
live = Color(0x000000,1)
lightGrey = Color(0xD3D3D3,1)
data = {} #Program dictionary
data['deadCell'] = RectangleAsset(50,50,LineStyle(1,lightGrey),dead)
data['liveCell'] = RectangleAsset(50,50,LineStyle(1,live),live)
data['gameBoardUpdate'] = []
data['gameBoard'] = gameBoard()
redrawAll()
nextGen = TextAsset('NextGeneration')
Sprite(nextGen,(190,520))
App().listenMouseEvent('click',mouseClick)
App().run()
|
985,222 | 4de551c90f7b20e16e1a33c7ca774261503f8296 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Manages an environment -- a combination of a version of Python and set
of dependencies.
"""
import hashlib
import os
import re
import sys
import itertools
import subprocess
import importlib
from pathlib import Path
from .console import log
from . import util, build_cache
WIN = (os.name == "nt")
def iter_matrix(environment_type, pythons, conf, explicit_selection=False):
"""
Iterate through all combinations of the given requirement
matrix and python versions.
Yields
------
combination : dict of {(key_type, key_name): value, ...}
Combination of environment settings.
Possible key types are ('req', 'env', 'env_nobuild', 'python').
"""
env_classes = {}
def get_env_type(python):
env_type = env_classes.get(python)
if env_type is None:
cls = get_environment_class(conf, python)
env_type = cls.tool_name
env_classes[python] = env_type
return env_type
platform_keys = {
('environment_type', None): environment_type,
('sys_platform', None): sys.platform
}
# Parse requirement matrix
parsed_matrix = _parse_matrix(conf.matrix)
keys = list(parsed_matrix.keys())
values = list(parsed_matrix.values())
# Convert values to lists in the expected format
values = [value if isinstance(value, list) else [value] for value in values]
values = [[''] if value == [] else value for value in values]
# Process excludes
for python in pythons:
empty_matrix = True
# Cartesian product of everything
all_keys = [('python', None)] + keys
all_combinations = itertools.product([python], *values)
for combination in all_combinations:
target = dict(zip(all_keys, combination))
target.update(platform_keys)
if not environment_type:
try:
target[('environment_type', None)] = get_env_type(target[('python', None)])
except EnvironmentUnavailable as err:
log.warning(str(err))
continue
for rule in conf.exclude:
# check if all fields in the rule match
rule = _parse_exclude_include_rule(rule)
if match_rule(target, rule):
# rule matched
break
else:
# not excluded
empty_matrix = False
yield dict(item for item in zip(all_keys, combination)
if item[1] is not None)
# If the user explicitly selected environment/python, yield it
# even if matrix contains no packages to be installed
if empty_matrix and explicit_selection:
yield {('python', None): python}
# Process includes, unless explicit selection
if explicit_selection:
return
for include in conf.include:
include = _parse_exclude_include_rule(include, is_include=True)
# Platform keys in include statement act as matching rules
target = dict(platform_keys)
if not environment_type:
try:
target[('environment_type', None)] = get_env_type(include[('python', None)])
except EnvironmentUnavailable as err:
log.warning(str(err))
continue
rule = {}
for key in platform_keys.keys():
if key in include:
rule[key] = include.pop(key)
if match_rule(target, rule):
# Prune empty keys
for key in list(include.keys()):
if include[key] is None:
include.pop(key)
yield include
def _parse_matrix(matrix, bare_keys=()):
"""
Parse 'matrix' and include/exclude rule configuration entries.
It is in format::
{"key_type1": {"key1": value1, "key2, value2, ...},
...,
"nondict_key1": nondict_value1,
...}
or in legacy format::
{"key1": value1, ..., "nondict_key1": nondict_value1, ...}
in which the key type is assumed to be "req".
Parameters
----------
matrix
Configuration matrix or rule entry
bare_keys : iterable
Non-dictionary key values to store as is
Returns
-------
parsed_matrix
Dictionary {(key_type, key): value, ...}
"""
matrix = dict(matrix)
result = {}
# Insert non-dict ("bare") keys first
for key in bare_keys:
if key in matrix:
result[key, None] = matrix.pop(key)
# Insert remaining matrix entries
matrix_types = ('req', 'env', 'env_nobuild')
if any(t in matrix for t in matrix_types):
# New-style config
matrices = []
for t in matrix_types:
submatrix = matrix.pop(t, {})
matrices.append((t, submatrix))
# Check if spurious keys left
remaining_keys = tuple(matrix.keys())
if remaining_keys:
raise util.UserError('Unknown keys in "matrix" configuration: {}, expected: {}'.format(
remaining_keys, matrix_types + tuple(bare_keys)))
else:
# Backward-compatibility for old-style config
matrices = [('req', matrix)]
# Convert values
for t, m in matrices:
for key, value in m.items():
result[t, key] = value
return result
def _parse_exclude_include_rule(rule, is_include=False):
"""
Parse exclude/include rule by adding key types.
Parameters
----------
rule : dict
Keys must be str, values must be str or None.
The keys 'python', 'environment_type', 'sys_platform',
are parsed specially and result to the corresponding key types.
Returns
-------
rule : dict
Dictionary of {(key_type, key): value, ...}
"""
if is_include and 'python' not in rule:
raise util.UserError(f"include rule '{rule}' does not specify Python version")
bare_keys = ('python', 'environment_type', 'sys_platform')
return _parse_matrix(rule, bare_keys)
def match_rule(target, rule):
"""
Match rule to a target.
Parameters
----------
target : dict
Dictionary containing [((key_type, key), value), ...].
rule : dict
Dictionary containing [((key_type, key), match), ...], to be matched
to *target*. Match can be str specifying a regexp that must
match target[key], or None. None matches either None
or a missing key in *target*. If match is not None,
and the key is missing in *target*, the rule does not match.
The key_type must match exactly.
Returns
-------
matched : bool
Whether the rule matched. The rule matches if
all keys match.
"""
for key, value in rule.items():
if value is None:
if key in target and target[key] is not None:
return False
elif key not in target or target[key] is None:
return False
else:
w = str(target[key])
m = re.match(str(value), w)
if m is None or m.end() != len(w):
return False
# rule matched
return True
def get_env_name(tool_name, python, requirements, tagged_env_vars, build=False):
"""
Get a name to uniquely identify an environment.
Parameters
----------
build : bool
Whether to omit non-build environment variables.
The canonical name of the environment is the name with build=False.
"""
if tool_name:
name = [tool_name]
else:
# Backward compatibility vs. result file names
name = []
name.append(f"py{python}")
reqs = list(requirements.items())
reqs.sort()
for key, val in reqs:
if val:
name.append(''.join([key, val]))
else:
name.append(key)
env_vars = _untag_env_vars(tagged_env_vars, build=build)
for env_var, value in sorted(env_vars.items()):
name.append(''.join([env_var, value]))
return util.sanitize_filename('-'.join(name))
def _untag_env_vars(tagged_env_vars, build=False):
vars = {}
for (tag, key), value in tagged_env_vars.items():
if not build or tag == 'build':
vars[key] = value
return vars
def get_environments(conf, env_specifiers, verbose=True):
"""
Iterator returning `Environment` objects for all of the
permutations of the given versions of Python and a matrix of
requirements.
Parameters
----------
conf : dict
asv configuration object
env_specifiers : list of str
List of environment specifiers, in the format
'env_name:python_spec'. If *env_name* is empty, autodetect
it. If *python_spec* is missing, use those listed in the
configuration file. Alternatively, can be the name given
by *Environment.name* if the environment is in the matrix.
verbose : bool, optional
Whether to display warnings about unknown environment types etc.
"""
if not env_specifiers:
all_environments = ()
env_specifiers = [conf.environment_type]
if not conf.environment_type and verbose:
log.warning(
"No `environment_type` specified in asv.conf.json. "
"This will be required in the future.")
else:
all_environments = list(get_environments(conf, None, verbose=verbose))
for env_spec in env_specifiers:
env_name_found = False
for env in all_environments:
if env_spec == env.name:
env_name_found = True
yield env
break
if env_name_found:
continue
explicit_selection = False
if env_spec and ':' in env_spec:
env_type, python_spec = env_spec.split(':', 1)
pythons = [python_spec]
explicit_selection = True
else:
env_type = env_spec
if env_type == "existing":
explicit_selection = True
pythons = ["same"]
else:
pythons = conf.pythons
if env_type != "existing":
requirements_iter = iter_matrix(env_type, pythons, conf,
explicit_selection)
else:
# Ignore requirement matrix
requirements_iter = [{('python', None): python} for python in pythons]
for entries in requirements_iter:
python, requirements, tagged_env_vars = _parse_matrix_entries(entries)
try:
if env_type:
cls = get_environment_class_by_name(env_type)
else:
cls = get_environment_class(conf, python)
yield cls(conf, python, requirements, tagged_env_vars)
except EnvironmentUnavailable as err:
if verbose:
log.warning(str(err))
def _parse_matrix_entries(entries):
"""
Parse mixed requirement / environment variable matrix entries
to requirements and tagged environment variables.
"""
python = None
requirements = {}
tagged_env_vars = {}
for (key_type, key), value in entries.items():
if key_type == 'python':
python = value
elif key_type == 'env':
tagged_env_vars[("build", key)] = value
elif key_type == 'env_nobuild':
tagged_env_vars[("nobuild", key)] = value
elif key_type == 'req':
requirements[key] = value
else:
# Shouldn't happen
raise ValueError(f"Invalid matrix key type {key}")
return python, requirements, tagged_env_vars
def get_environment_class(conf, python):
"""
Get a matching environment type class.
Parameters
----------
conf : dict
asv configuration object
python : str
Python version specifier. Acceptable values depend on the
Environment plugins installed but generally are:
- 'X.Y': A Python version, in which case conda or virtualenv
will be used to create a new environment.
- 'python' or '/usr/bin/python': Search for the given
executable on the search PATH, and use that. It is assumed
that all dependencies and the benchmarked project itself are
already installed.
"""
if python == 'same':
return ExistingEnvironment
# Try the subclasses in reverse order so custom plugins come first
classes = list(util.iter_subclasses(Environment))[::-1]
if conf.environment_type:
cls = get_environment_class_by_name(conf.environment_type)
classes.remove(cls)
classes.insert(0, cls)
for cls in classes:
if cls.matches_python_fallback and cls.matches(python):
return cls
raise EnvironmentUnavailable(
f"No way to create environment for python='{python}'")
def get_environment_class_by_name(environment_type):
"""
Find the environment class with the given name.
"""
for cls in util.iter_subclasses(Environment):
if cls.tool_name == environment_type:
return cls
raise EnvironmentUnavailable(
f"Unknown environment type '{environment_type}'")
def is_existing_only(environments):
"""
Check if the list of environments only contains ExistingEnvironment
"""
return all(isinstance(env, ExistingEnvironment) for env in environments)
class EnvironmentUnavailable(BaseException):
pass
class Environment:
"""
Manage a single environment -- a combination of a particular
version of Python and a set of dependencies for the benchmarked
project.
"""
tool_name = None
matches_python_fallback = True
def __init__(self, conf, python, requirements, tagged_env_vars):
"""
Get an environment for a given requirement matrix and
Python version specifier.
Parameters
----------
conf : dict
asv configuration object
python : str
A Python version specifier. This is the same as passed to
the `matches` method, and its exact meaning depends on the
environment.
requirements : dict (str -> str)
Mapping from package names to versions
tagged_env_vars : dict (tag, key) -> value
Environment variables, tagged for build vs. non-build
Raises
------
EnvironmentUnavailable
The environment for the given combination is not available.
"""
self._env_dir = conf.env_dir
self._repo_subdir = conf.repo_subdir
self._install_timeout = conf.install_timeout # gh-391
self._default_benchmark_timeout = conf.default_benchmark_timeout # gh-973
self._tagged_env_vars = tagged_env_vars
self._path = os.path.abspath(os.path.join(
self._env_dir, self.dir_name))
self._project = conf.project
self._is_setup = False
self._cache = build_cache.BuildCache(conf, self._path)
self._build_root = os.path.abspath(os.path.join(self._path, 'project'))
self._requirements = requirements
# These are needed for asv to build and run the project, not part of
# benchmark name mangling
self._base_requirements = {}
# gh-1314
asv_runner_path = os.getenv("ASV_RUNNER_PATH", "")
module_path = Path(asv_runner_path) / "asv_runner"
# Check if the path points to a directory containing the "asv_runner" module
if module_path.is_dir() and (module_path / "__init__.py").is_file():
spec = importlib.util.spec_from_file_location("asv_runner",
module_path / "__init__.py")
# Attempt to load the module
asv_runner_module = importlib.util.module_from_spec(spec)
try:
spec.loader.exec_module(asv_runner_module)
self._base_requirements["pip+asv_runner"] = asv_runner_path
except Exception as e:
self._base_requirements["pip+asv_runner"] = ""
log.warning(f"Failed to load module from ASV_RUNNER_PATH: {e}")
else:
self._base_requirements["pip+asv_runner"] = ""
if asv_runner_path:
log.warning("ASV_RUNNER_PATH does not point"
"to a directory containing the 'asv_runner' module")
if not util.ON_PYPY:
# XXX: What if pypy installed asv tries to benchmark a cpython
# python?
self._base_requirements["pip+pympler"] = ""
if (Path.cwd() / "poetry.lock").exists():
self._base_requirements["poetry-core"] = ""
if (Path.cwd() / "pdm.lock").exists():
self._base_requirements["pdm"] = ""
# Update the _base_requirements if needed
for key in list(self._requirements.keys()):
if key in self._base_requirements:
self._base_requirements[key] = self._requirements[key]
del self._requirements[key]
self._build_command = conf.build_command
self._install_command = conf.install_command
self._uninstall_command = conf.uninstall_command
self._global_env_vars = {}
self._global_env_vars['ASV'] = 'true'
self._global_env_vars['ASV_PROJECT'] = conf.project
self._global_env_vars['ASV_CONF_DIR'] = os.path.abspath(os.getcwd())
self._global_env_vars['ASV_ENV_NAME'] = self.name
self._global_env_vars['ASV_ENV_DIR'] = self._path
self._global_env_vars['ASV_ENV_TYPE'] = self.tool_name
installed_commit_hash = self._get_installed_commit_hash()
self._set_commit_hash(installed_commit_hash)
def _set_commit_hash(self, commit_hash):
if commit_hash is None:
self._global_env_vars.pop('ASV_COMMIT', None)
else:
self._global_env_vars['ASV_COMMIT'] = commit_hash
def _set_build_dirs(self, build_dir, cache_dir):
if build_dir is None:
self._global_env_vars.pop('ASV_BUILD_DIR', None)
else:
self._global_env_vars['ASV_BUILD_DIR'] = build_dir
if cache_dir is None:
self._global_env_vars.pop('ASV_BUILD_CACHE_DIR', None)
else:
self._global_env_vars['ASV_BUILD_CACHE_DIR'] = cache_dir
def _set_installed_commit_hash(self, commit_hash):
# Save status
install_checksum = self._get_install_checksum()
hash_file = os.path.join(self._path, 'asv-install-status.json')
data = {'commit_hash': commit_hash, 'install_checksum': install_checksum}
util.write_json(hash_file, data, api_version=1)
def _get_installed_commit_hash(self):
hash_file = os.path.join(self._path, 'asv-install-status.json')
data = {}
if os.path.isfile(hash_file):
try:
data = util.load_json(hash_file, api_version=1)
except util.UserError:
pass
# If configuration changed, force reinstall
install_checksum = self._get_install_checksum()
if data.get('install_checksum', None) != install_checksum:
return None
return data.get('commit_hash', None)
def _get_install_checksum(self):
return [self._repo_subdir,
self._install_timeout,
self._project,
self._build_command,
self._install_command,
self._uninstall_command]
@property
def installed_commit_hash(self):
return self._get_installed_commit_hash()
@classmethod
def matches(self, python):
"""
Returns `True` if this environment subclass can handle the
given Python specifier.
"""
return False
@property
def name(self):
"""
Get a name to uniquely identify this environment.
"""
return get_env_name(self.tool_name,
self._python,
self._requirements,
self._tagged_env_vars)
@property
def hashname(self):
"""
Get a hash to uniquely identify this environment.
"""
return hashlib.md5(self.name.encode('utf-8')).hexdigest()
@property
def dir_name(self):
"""
Get the name of the directory where the environment resides.
This is not necessarily unique, and may be shared across
different environments.
"""
name = get_env_name(self.tool_name,
self._python,
self._requirements,
self._tagged_env_vars,
build=True)
return hashlib.md5(name.encode('utf-8')).hexdigest()
@property
def requirements(self):
"""Return the requirements"""
return self._requirements
@property
def env_vars(self):
"""
All environment variables configured in the matrix.
"""
return _untag_env_vars(self._tagged_env_vars, build=False)
@property
def build_env_vars(self):
"""
Build-time environment variables configured in the matrix.
"""
return _untag_env_vars(self._tagged_env_vars, build=True)
@property
def python(self):
return self._python
def check_presence(self):
"""
Check whether the environment already exists.
"""
if not os.path.isdir(self._env_dir):
return False
try:
info = self.load_info_file(self._path)
except (util.UserError, IOError):
return False
expected_info = {
'tool_name': self.tool_name,
'python': self._python,
'requirements': self._requirements,
'build_env_vars': self.build_env_vars
}
if info != expected_info:
return False
for executable in ['pip', 'python']:
try:
self.find_executable(executable)
except IOError:
return False
try:
self.run_executable('python', ['-c', 'pass'])
except (subprocess.CalledProcessError, OSError):
return False
return True
def create(self):
"""
Create the environment on disk. If it doesn't exist, it is
created. Then, all of the requirements are installed into it.
"""
if self._is_setup:
return
if not self.check_presence():
if os.path.exists(self._path):
util.long_path_rmtree(self._path)
if not os.path.exists(self._env_dir):
try:
os.makedirs(self._env_dir)
except OSError:
# Environment.create may be called in parallel for
# environments with different self._path, but same
# self._env_dir. This causes a race condition for
# the above makedirs() call --- but not for the
# rest of the processing. Therefore, we will just
# ignore the error here, as things will fail at a
# later stage if there is really was a problem.
pass
try:
self._setup()
except Exception:
log.error(f"Failure creating environment for {self.name}")
if os.path.exists(self._path):
util.long_path_rmtree(self._path)
raise
self.save_info_file(self._path)
self._is_setup = True
def _setup(self):
"""
Implementation for setting up the environment.
"""
raise NotImplementedError()
def run(self, args, **kwargs):
"""
Start up the environment's python executable with the given
args.
"""
raise NotImplementedError()
def _interpolate_commands(self, commands):
"""
Parse a command list with interpolated variables to a sequence of commands.
Parameters
----------
commands : {list of str}
Commands to execute
Returns
-------
run_commands : list of (cmd, env, return_codes, cwd)
Parsed commands to run.
"""
if not commands:
return []
if not isinstance(commands, list):
commands = [commands]
# All environment variables are available as interpolation variables,
# lowercased without the prefix.
kwargs = dict()
for key, value in self._global_env_vars.items():
if key == 'ASV':
continue
assert key.startswith('ASV_')
interp_key = key[4:].lower()
kwargs[interp_key] = value
# There is an additional {wheel_file} interpolation variable
if 'build_cache_dir' in kwargs:
cache_dir = kwargs['build_cache_dir']
if os.path.isdir(cache_dir):
files = os.listdir(cache_dir)
wheels = [fn for fn in files if fn.lower().endswith('.whl')]
if len(wheels) == 1:
kwargs['wheel_file'] = os.path.join(cache_dir, wheels[0])
# Interpolate, and raise useful error message if it fails
return [util.interpolate_command(c, kwargs) for c in commands]
def _interpolate_and_run_commands(self, commands, default_cwd, extra_env=None):
interpolated = self._interpolate_commands(commands)
for cmd, env, return_codes, cwd in interpolated:
environ = dict(os.environ)
if extra_env is not None:
environ.update(extra_env)
environ.update(env)
if cwd is None:
cwd = default_cwd
self.run_executable(cmd[0], cmd[1:], timeout=self._install_timeout, cwd=cwd,
env=environ, valid_return_codes=return_codes)
def checkout_project(self, repo, commit_hash):
"""
Check out the working tree of the project at given commit hash
"""
self._set_commit_hash(commit_hash)
repo.checkout(self._build_root, commit_hash)
def install_project(self, conf, repo, commit_hash):
"""
Build and install the benchmarked project into the environment.
Uninstalls any installed copy of the project first.
"""
if self._repo_subdir:
build_dir = os.path.join(self._build_root, self._repo_subdir)
else:
build_dir = self._build_root
# Check first if anything needs to be done
installed_commit_hash = self._get_installed_commit_hash()
if installed_commit_hash == commit_hash:
self._set_commit_hash(installed_commit_hash)
self._set_build_dirs(None, None)
return
# Checkout first, so that uninstall can access build_dir
# (for e.g. Makefiles)
self.checkout_project(repo, commit_hash)
self._set_build_dirs(build_dir, None)
# Uninstall
self._uninstall_project()
# Build if not in cache
cache_dir = self._cache.get_cache_dir(commit_hash)
if cache_dir is not None:
self._set_build_dirs(build_dir, cache_dir)
else:
cache_dir = self._cache.create_cache_dir(commit_hash)
self._set_build_dirs(build_dir, cache_dir)
self._build_project(repo, commit_hash, build_dir)
# Install
self._install_project(repo, commit_hash, build_dir)
# Mark cached build as valid
self._cache.finalize_cache_dir(commit_hash)
# Mark installation as updated
self._set_installed_commit_hash(commit_hash)
def _install_project(self, repo, commit_hash, build_dir):
"""
Run install commands
"""
cmd = self._install_command
if cmd is None:
# Run pip via python -m pip, avoids shebang length limit on Linux.
# Don't run it in build directory, because it may contain Python packages
# that pip believes to be already installed.
cmd = ["in-dir={env_dir} python -mpip install {wheel_file}"]
if cmd:
commit_name = repo.get_decorated_hash(commit_hash, 8)
log.info(f"Installing {commit_name} into {self.name}")
self._interpolate_and_run_commands(cmd, default_cwd=build_dir,
extra_env=self.build_env_vars)
def _uninstall_project(self):
"""
Run uninstall commands
"""
# Mark installation invalid first
self._set_installed_commit_hash(None)
cmd = self._uninstall_command
if cmd is None:
# Run pip via python -m pip, avoids shebang length limit on Linux
# pip uninstall may fail if not installed, so allow any exit code
cmd = ['return-code=any python -mpip uninstall -y {project}']
if cmd:
log.info(f"Uninstalling from {self.name}")
self._interpolate_and_run_commands(cmd, default_cwd=self._env_dir,
extra_env=self.build_env_vars)
def _build_project(self, repo, commit_hash, build_dir):
"""
Run build commands
"""
build_dir_path = Path(build_dir)
def has_file(file_name):
return (build_dir_path / file_name).exists()
cmd = self._build_command
if cmd is None:
if has_file('pyproject.toml'):
cmd = [
("PIP_NO_BUILD_ISOLATION=false "
"python -mpip wheel --no-deps --no-index "
"-w {build_cache_dir} {build_dir}")
]
else:
cmd = [
"python setup.py build",
("PIP_NO_BUILD_ISOLATION=false "
"python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}")
]
if cmd:
commit_name = repo.get_decorated_hash(commit_hash, 8)
log.info(f"Building {commit_name} for {self.name}")
self._interpolate_and_run_commands(cmd, default_cwd=build_dir,
extra_env=self.build_env_vars)
def can_install_project(self):
"""
Return `True` if specific revisions of the benchmarked project
can be installed into this environment.
"""
return True
def find_executable(self, executable):
"""
Find an executable (eg. python, pip) in the environment.
If not found, raises IOError
"""
# Assume standard virtualenv/Conda layout
if WIN:
paths = [self._path,
os.path.join(self._path, 'Scripts'),
os.path.join(self._path, 'bin')]
else:
paths = [os.path.join(self._path, 'bin')]
return util.which(executable, paths)
def run_executable(self, executable, args, **kwargs):
"""
Run a given executable (eg. python, pip) in the environment.
"""
env = kwargs.pop("env", os.environ).copy()
env.update(self._global_env_vars)
# Insert bin dirs to PATH
if "PATH" in env:
paths = env["PATH"].split(os.pathsep)
else:
paths = []
if WIN:
subpaths = ['Library\\mingw-w64\\bin',
'Library\\bin',
'Library\\usr\\bin',
'Scripts']
for sub in subpaths[::-1]:
paths.insert(0, os.path.join(self._path, sub))
paths.insert(0, self._path)
else:
paths.insert(0, os.path.join(self._path, "bin"))
# Discard PYTHONPATH, which can easily break the environment
# isolation
if 'ASV_PYTHONPATH' in env:
env['PYTHONPATH'] = env['ASV_PYTHONPATH']
env.pop('ASV_PYTHONPATH', None)
else:
env.pop('PYTHONPATH', None)
# When running pip, we need to set PIP_USER to false, as --user (which
# may have been set from a pip config file) is incompatible with
# virtualenvs.
kwargs["env"] = dict(env,
PIP_USER=str("false"),
PATH=str(os.pathsep.join(paths)))
exe = self.find_executable(executable)
if kwargs.get("timeout", None) is None:
kwargs["timeout"] = self._install_timeout
return util.check_output([exe] + args, **kwargs)
def load_info_file(self, path):
path = os.path.join(path, 'asv-env-info.json')
return util.load_json(path)
def save_info_file(self, path):
"""
Save a file with information about the environment into
directory `path`.
"""
path = os.path.join(path, 'asv-env-info.json')
content = {
'tool_name': self.tool_name,
'python': self._python,
'requirements': self._requirements,
'build_env_vars': self.build_env_vars
}
util.write_json(path, content)
class ExistingEnvironment(Environment):
tool_name = "existing"
def __init__(self, conf, executable, requirements, tagged_env_vars):
if executable == 'same':
executable = sys.executable
try:
executable = os.path.abspath(util.which(executable))
self._python = util.check_output(
[executable,
'-c',
'import sys; '
'print(str(sys.version_info[0]) + "." + str(sys.version_info[1]))'
]).strip()
except (util.ProcessError, OSError, IOError):
raise EnvironmentUnavailable()
self._executable = executable
self._requirements = {}
super(ExistingEnvironment, self).__init__(conf,
executable,
requirements,
tagged_env_vars)
self._global_env_vars.pop('ASV_ENV_DIR')
@property
def installed_commit_hash(self):
return None
@classmethod
def matches(cls, python):
if python == 'same':
python = sys.executable
try:
util.which(python)
except IOError:
return False
else:
return True
@property
def name(self):
return get_env_name(self.tool_name,
self._executable.replace(os.path.sep, '_'),
{},
self._tagged_env_vars)
def check_presence(self):
return True
def create(self):
pass
def _setup(self):
pass
def install_project(self, conf, repo, commit_hash=None):
pass
def can_install_project(self):
return False
def run(self, args, **kwargs):
log.debug(f"Running '{' '.join(args)}' in {self.name}")
return util.check_output([
self._executable] + args, **kwargs)
|
985,223 | 9ffc345926eb507dbde8882fab2c33043c915c7c |
import frappe
from frappe import _
from frappe.desk.reportview import get_match_cond, get_filters_cond
from frappe.model.mapper import get_mapped_doc
import json
from frappe import _
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
cond = ""
if filters.get("posting_date"):
cond = "and (batch.expiry_date is null or batch.expiry_date >= %(posting_date)s)"
batch_nos = None
if filters.get("customer"):
supplier = frappe.get_list('Double Ledger Parties', filters={'customer': filters.get("customer")}, fields=['supplier'], limit=1)
if supplier:
cond = " and batch.supplier = '{0}'".format(supplier[0].supplier)
args = {
'item_code': filters.get("item_code"),
'warehouse': filters.get("warehouse"),
'posting_date': filters.get('posting_date'),
'txt': "%{0}%".format(txt),
"start": start,
"page_len": page_len
}
having_clause = "having sum(sle.actual_qty) > 0"
if filters.get("is_return"):
having_clause = ""
if args.get('warehouse'):
batch_nos = frappe.db.sql("""select sle.batch_no, round(sum(sle.actual_qty),2), sle.stock_uom,
concat('MFG-',batch.manufacturing_date), concat('EXP-',batch.expiry_date)
from `tabStock Ledger Entry` sle
INNER JOIN `tabBatch` batch on sle.batch_no = batch.name
where
batch.disabled = 0
and sle.item_code = %(item_code)s
and sle.warehouse = %(warehouse)s
and (sle.batch_no like %(txt)s
or batch.expiry_date like %(txt)s
or batch.manufacturing_date like %(txt)s)
and batch.docstatus < 2
{cond}
{match_conditions}
group by batch_no {having_clause}
order by batch.expiry_date, sle.batch_no desc
limit %(start)s, %(page_len)s""".format(
cond=cond,
match_conditions=get_match_cond(doctype),
having_clause = having_clause
), args)
return batch_nos
else:
return frappe.db.sql("""select name, concat('MFG-', manufacturing_date), concat('EXP-',expiry_date) from `tabBatch` batch
where batch.disabled = 0
and item = %(item_code)s
and (name like %(txt)s
or expiry_date like %(txt)s
or manufacturing_date like %(txt)s)
and docstatus < 2
{0}
{match_conditions}
order by expiry_date, name desc
limit %(start)s, %(page_len)s""".format(cond, match_conditions=get_match_cond(doctype)), args)
@frappe.whitelist()
def get_batch_list(customer,item_code,warehouse=None):
if warehouse:
batch_nos = None
if customer:
supplier = frappe.get_list('Double Ledger Parties', filters={'customer': customer}, fields=['supplier'], limit=1)
if supplier:
cond = " and batch.supplier = '{0}'".format(supplier[0].supplier)
args = {
'item_code': item_code,
'warehouse': warehouse
# "page_len": 100
}
having_clause = "having sum(sle.actual_qty) > 0"
if args.get('warehouse'):
batch_nos = frappe.db.sql("""select sle.batch_no,batch.design_no, round(sum(sle.actual_qty),2)
from `tabStock Ledger Entry` sle
INNER JOIN `tabBatch` batch on sle.batch_no = batch.name
where
batch.disabled = 0
and sle.item_code = %(item_code)s
and sle.warehouse = %(warehouse)s
and batch.docstatus < 2
{cond}
{match_conditions}
group by batch_no {having_clause}
order by batch.expiry_date, sle.batch_no desc""".format(
cond=cond,
match_conditions=get_match_cond('Batch'),
having_clause = having_clause
), args)
return batch_nos
else:
return frappe.db.sql("""select name, design_no from `tabBatch` batch
where batch.disabled = 0
and item = %(item_code)s
and docstatus < 2
{0}
{match_conditions}
order by expiry_date, name desc
""".format(cond, match_conditions=get_match_cond('Batch')), args)
# limit %(page_len)s
else:
return False
@frappe.whitelist()
def get_process_order_finish_item(doc_name,filters_json=None):
if filters_json:
raw_dict = {}
finish_dict = {}
process_dict = {}
filters= json.loads(filters_json)
for res in filters:
if res[0] == 'Process Order':
process_dict[res[1]] = [res[2],res[3]]
if res[0] == 'Process Order Item':
raw_dict[res[1]] = [res[2],res[3]]
if res[0] == 'Process Order Finish Item':
finish_dict[res[1]] = [res[2],res[3]]
process_order_lst = []
if process_dict:
process_orders = frappe.get_list('Process Order', filters=process_dict, fields=('name'))
for p in process_orders:
process_order_lst.append(p.get('name'))
if raw_dict:
if process_order_lst:
raw_dict['parent'] = ['in',process_order_lst]
process_orders_item = frappe.get_list('Process Order Item', filters=raw_dict, fields=('parent'))
for p in process_orders_item:
if p.get('parent') not in process_order_lst:
process_order_lst.append(p.get('parent'))
if process_order_lst:
finish_dict['parent'] = ['in',process_order_lst]
if finish_dict:
finish_items = frappe.get_list('Process Order Finish Item', filters=finish_dict, fields='*')
if finish_items:
for res in finish_items:
if res.quantity > 0:
item = frappe.get_doc('Item', res.item)
p_order = frappe.get_doc('Process Order', res.parent)
res['description'] = item.description
res['stock_uom'] = item.stock_uom
res['uom'] = item.stock_uom
res['warehouse'] = p_order.fg_warehouse
res['business_unit'] = p_order.business_unit
return finish_items
else:
frappe.throw(_("No Data Found."))
else:
frappe.throw(_("Please set the filter."))
return False
def set_workstation_net_rate(doc,method):
total = 0
total += doc.p_electricity_cost if doc.p_electricity_cost else 0
total += doc.p_consumable_cost if doc.p_consumable_cost else 0
total += doc.p_rent_cost if doc.p_rent_cost else 0
total += doc.p_wages if doc.p_wages else 0
doc.p_net_hour_rate = total |
985,224 | 04b8548da41d9f7bde9040cd120b5d245e05e782 | #Conversor de temperaturas
t = float(input('Informe a temperatura em °C:'))
f = ((9 * t) / 5) + 32
print('A temperatura de {:.2f}°C convertida em Farenheit é {:.2f}°F'.format(t,f))
|
985,225 | 713ba1fa2c224e5a78bbba0ca85fb3ac049d04a9 | #!/usr/bin/env python
# Given N numbers , [N<=10^5] we need to count the total pairs of numbers
# that have a difference of K. [K>0 and K<1e9]
#
# Input Format:
# 1st line contains N & K (integers).
# 2nd line contains N numbers of the set. All the N numbers are assured to
# be distinct.
# Output Format:
# One integer saying the no of pairs of numbers that have a diff K.
#
# Sample Input #00:
# 5 2
# 1 5 3 4 2
#
# Sample Output #00:
# 3
#
# Sample Input #01:
# 10 1
# 363374326 364147530 61825163 1073065718 1281246024 1399469912 428047635
# 491595254 879792181 1069262793
#
# Sample Output #01:
# 0
#
def main(argv):
inp = sys.stdin
out = sys.stdout
if len(argv) > 1:
inp = open(argv[1], 'r')
if len(argv) > 2: out = open(argv[2], 'r')
(N, K) = tuple([int(x) for x in inp.readline().split()])
iterator = (int(x) for x in inp.readline().split())
setOfN = set(iterator)
totalPairs = 0
for x in setOfN:
if (x+K) in setOfN:
totalPairs += 1
print totalPairs
if len(argv) > 1: inp.close()
if len(argv) > 2: out.close()
if __name__ == "__main__":
import sys
main(sys.argv)
|
985,226 | 692387636eb06f73b80d1361438050e34ad4b087 |
from django.contrib import admin
from django.urls import path
from demo import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index,name='home'),
path('signup', views.signup,name='signup'),
]
|
985,227 | a5419e295e753c1aecaa4bd3add22c93ce1210c3 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Topic: 第十三章:脚本编程与系统管理
Description: 许多人使用 Python 作为一个 shell 脚本的替代,用来实现常用系统任务的自动化,
如文件的操作,系统的配置等。本章的主要目标是描述光宇编写脚本时候经常遇到的
一些功能。例如,解析命令行选项、获取有用的系统配置数据等等。第 5 章也包含了
与文件和目录相关的一般信息。
Title: 获取终端的大小
Issue:你需要知道当前终端的大小以便正确的格式化输出。
Answer: 使用 os.get terminal size() 函数来做到这一点。
"""
import os
sz = os.get_terminal_size()
print(sz)
print(sz.columns)
print(sz.lines)
"""
有太多方式来得知终端大小了,从读取环境变量到执行底层的 ioctl() 函数等等。
不过,为什么要去研究这些复杂的办法而不是仅仅调用一个简单的函数呢?
"""
|
985,228 | 322fe2afb8d93cf1e9a70621beb2988084dc7d91 | from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=128, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
return self.name
class Page(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
title = models.CharField(max_length=128)
url = models.URLField(unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
def __str__(self):
return self.title
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
# website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
def pic_url(self):
if self.picture and hasattr(self.picture, 'url'):
return self.picture.url
else:
return '/media/default_user.jpg'
def __str__(self):
return self.user.username
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
#page = models.ForeignKey(Page, on_delete=models.CASCADE)
page = models.CharField(max_length=256)
news = models.CharField(max_length=256)
content = models.CharField(max_length=256)
def __str__(self):
return self.user.username
class News(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
category = models.CharField(max_length=512)
title = models.CharField(max_length=32)
content = models.CharField(max_length=512)
def __str__(self):
return self.title
class Likepage(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE)
page = models.ForeignKey(Page,on_delete=models.CASCADE)
likepageid = models.IntegerField(null=True)
def __str__(self):
return self.page.title + "(liked by " + self.user.username + ")" |
985,229 | e712a3315d4a90e9724afeac340bb23801032008 |
# colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255,165,0)
LIGHT_CYAN = (224,255,255)
SLATE_GRAY = (112,128,144)
DARK_GREEN = (0, 100, 0)
#SCREEN DIMENSIONS
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
#Frame rate
FPS = 100
# PLAYER COLOR
PLAYER_COLOR = ORANGE
# LEVEL COLOR
LEVEL_COLOR = LIGHT_CYAN
#PLAYER DIMENSIONS
PLAYER_HEIGHT = 80
PLAYER_WIDTH = 60
# PLATFORM COLOR
PLATFORM_COLOR = DARK_GREEN
PLATFORM_HEIGHT = PLAYER_HEIGHT - 10
# WINDOW BAR TITLE
WINDOW_TITLE = "New little game"
# GAME LOOP SETTINGS
HIGHEST_JUMP_RATE = 10
HIGHEST_JUMP_THRESHOLD = 300
# LEVEL SPRITE SHEET
SPRITE_SHEET_PATH = "images/tiles_spritesheet.png" |
985,230 | d625b038e8018b731d42af10d19fb825fb2aff1f | from os.path import join as _join
from re import split as _split
def formatInPdf(canvas, text, rowNo, colNo, maxLen):
''' Formats the info based on the len of the table and text
'''
actualText = []
i = 0
if len(text) == 1:
actualText.append(text)
while i < len(text) - 1:
actualText.append(text[i:i + maxLen])
i += maxLen
for i in range(len(actualText)):
if i != len(actualText) - 1:
actualText[i] += '-'
rowNo += 2
for i in range(len(actualText)):
for j in range(len(actualText[i])):
canvas.drawString(rowNo - (j * 4.3) - 1, colNo - (i * 8), actualText[i][-j - 1])
def getCustomerAddress(customerAddress):
''' Returns customer address by splitting to set in table
'''
count = 0
formattedAddress = []
sample_str = ''
for cust in _split('[^a-zA-Z0-9\.\,]', customerAddress):
if isinstance(cust, (str, unicode)) and count + len(cust) < 43:
sample_str += '{} '.format(cust)
count += len(cust)
else:
formattedAddress.append(sample_str)
sample_str = '{} '.format(cust)
count = len(sample_str)
if len(formattedAddress) > 1 and count + len(formattedAddress[-1]) < 43:
formattedAddress[-1] += sample_str
else:
formattedAddress.append(sample_str)
return formattedAddress
def getParticular(particular):
''' Returns customer address by splitting to set in table
'''
particularItem = _split('[^a-zA-Z0-9\.\,]', particular)
formattedParticular = []
splitted_customer = []
for word in particularItem:
if len(word) > 42:
for i in range(0, len(word), 42):
splitted_customer.append(word[i:i+42])
else:
splitted_customer.append(word)
particularItem = splitted_customer
tempStr = ''
count = 0
for cust in particularItem:
if isinstance(cust, str) and count + len(cust) < 42:
tempStr += '{} '.format(cust)
count += len(cust) + 1
else:
formattedParticular.append(tempStr)
tempStr = '{} '.format(cust)
count = len(tempStr) + 1
if len(formattedParticular) > 1 and count + len(''._join(formattedParticular[-1])) < 42:
formattedParticular[-1] += tempStr
else:
formattedParticular.append(tempStr)
return formattedParticular
|
985,231 | 1e21a46b7b530b8b68d21437561cb66c9fc66eb3 | """
Blaze expression graph for deferred evaluation. Each expression node has
an opcode and operands. An operand is a Constant or another expression node.
Each expression node carries a DataShape as type.
"""
from __future__ import absolute_import, division, print_function
from functools import partial
array = 'array' # array input
kernel = 'kernel' # kernel application, carrying the blaze kernel as a
# first argument (Constant)
class ExprContext(object):
"""
Context for blaze graph expressions.
This keeps track of a mapping between graph expression nodes and the
concrete data inputs (i.e. blaze Arrays).
Attributes:
===========
terms: { ArrayOp: Array }
Mapping from ArrayOp nodes to inputs
"""
def __init__(self, contexts=[]):
# Coercion constraints between types with free variables
self.constraints = []
self.terms = {} # All terms in the graph, { Array : Op }
self.params = []
for ctx in contexts:
self.constraints.extend(ctx.constraints)
self.terms.update(ctx.terms)
self.params.extend(ctx.params)
def add_input(self, term, data):
if term not in self.terms:
self.params.append(term)
self.terms[term] = data
class Op(object):
"""
Single node in blaze expression graph.
Attributes
----------
opcode: string
Kind of the operation, i.e. 'array' or 'kernel'
uses: [Op]
Consumers (or parents) of this result. This is useful to keep
track of, since we always start evaluation from the 'root', and we
may miss tracking outside uses. However, for correct behaviour, these
need to be retained
"""
def __init__(self, opcode, dshape, *args, **metadata):
self.opcode = opcode
self.dshape = dshape
self.uses = []
self.args = list(args)
if opcode == 'kernel':
assert 'kernel' in metadata
assert 'overload' in metadata
self.metadata = metadata
for arg in self.args:
arg.add_use(self)
def add_use(self, use):
self.uses.append(use)
def __repr__(self):
opcode = self.opcode
if opcode == kernel:
opcode = self.metadata["kernel"]
metadata = ", ".join(self.metadata)
return "%s(...){dshape(%s), %s}" % (opcode, self.dshape, metadata)
def tostring(self):
subtrees = " -+- ".join(map(str, self.args))
node = str(self)
length = max(len(subtrees), len(node))
return "%s\n%s" % (node.center(len(subtrees) / 2), subtrees.center(length))
ArrayOp = partial(Op, array)
# Kernel application. Associated metadata:
# kernel: the blaze.function.Kernel that was applied
# overload: the blaze.overload.Overload that selected for the input args
KernelOp = partial(Op, kernel)
|
985,232 | 8490f8995d70522d040f9b90aa6445e93a778c6e | from ChipseqReport import *
# for trackers_derived_sets and trackers_master
if not os.path.exists("conf.py"):
raise IOError("could not find conf.py")
exec(compile(open("conf.py").read(), "conf.py", 'exec'))
class CountsTranscripts (TrackerSQL):
mPattern = "transcripts"
mAsTables = True
def getSlices(self, subset=None):
return ("source", "feature", "contig")
def __call__(self, track, slice=None):
data = self.get( '''SELECT %(slice)s, count(DISTINCT gene_id), count(DISTINCT transcript_id)
FROM %(track)s
GROUP BY %(slice)s''' % locals() )
result = odict()
for x, genes, transcripts in data:
result[x] = odict((('genes', genes), ('transcripts', transcripts)))
return result
class CountsPromotors(CountsTranscripts):
mPattern = "promotors"
|
985,233 | 3f657bf7611469f77e6cf6e8459d964a59c34876 | ii = [('BachARE.py', 3)] |
985,234 | 68f47ea32216595bbac0848291fc9e03ef096520 | from django.contrib import admin
from .models import DepTour
class ToursAdmin(admin.ModelAdmin):
fields = [
"name",
"confirmed",
"datetime",
"email",
"phone",
"comments",
"deprel_comments",
]
readonly_fields = ("name", "email", "phone", "comments")
list_display = (
"name",
"confirmed",
"email",
"datetime",
"date_submitted",
"phone",
"comments",
"deprel_comments",
)
admin.site.register(DepTour, ToursAdmin)
|
985,235 | f087c11e9ee4f59a7484d404fd85201d1d847948 | import nltk
import string
from collections import Counter
def get_tokens():
text = """
Любой data scientist ежедневно работает с большими объемами данных. Считается, что около 60% – 70% времени занимает
первый этап рабочего процесса: очистка, фильтрация и преобразование данных в формат, подходящий для применения
алгоритмов машинного обучения. На втором этапе выполняется предобработка и непосредственное обучение моделей.
В сегодняшней статье мы сконцентрируемся на втором этапе процесса и рассмотрим различные техники и рекомендации,
являющиеся результатом моего участия более чем в 100 соревнованиях по машинному обучению. Несмотря на то, что
описанные концепции имеют достаточно общий характер, они будут полезны при решении многих конкретных задач.
Все примеры кода написаны на Python!
"""
lowers = text.lower()
# remove the punctuation using the character deletion step of translate
no_punctuation = lowers.translate()
tokens = nltk.word_tokenize(no_punctuation)
return tokens
tokens = get_tokens()
count = Counter(tokens)
print(count.most_common(10))
|
985,236 | 1fc2be7e49f9e138955254386173625318846219 | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Hi, hier gibt's Miete zurück!")
|
985,237 | bda3d997b1fc7801ef1bb452e53e2e489acc614a | class Command:
UNDEFINED = '0'
JOIN_CHANNEL = '1'
LEAVE_CHANNEL = '2'
GET_CHANNELS = '3'
GET_USER = '4'
USER_JOINS = '5'
USER_LEAVES = '6'
|
985,238 | 7c7b135f55fc0c2e7051bfce8addedf69ee8368c | #coding=utf-8
'''
Created on 2017年7月4日
@author: FeiFei
'''
'''
the flowing methods aim to match attribute value and the definition
'''
def is_match_tokens2tokens(tokens1,tokens2):
'''
whether two tokens are equal,
'''
for i in range(len(tokens1)):
# if tokens1[i].equal(tokens2[i])==False:
if tokens1[i][0]==tokens2[i][0]:
return False
return True
def find_prefix(tokens,i):
'''find the prefixs of a string'''
prefix=[]
for j in range(1,i+1):
prefix.append(tokens[:j])
return prefix
def find_postfix(tokens,i):
'''find the postfixs of a string'''
postfix=[]
for j in range(1,i+1):
postfix.append(tokens[j:i+1])
return postfix
def caculate_partial_table(tokens):
'''caculate the jump table to decide the step of a word when the word is not a match'''
if len(tokens)==1:
return [0]
ret = [0]
for i in range(1,len(tokens)):
prefix=find_prefix(tokens,i)
postfix=find_postfix(tokens,i)
prefix.sort(key=lambda x:len(x))
postfix.sort(key=lambda x:len(x))
common=[]
for i in range(len(prefix)):
if is_match_tokens2tokens(prefix[i],postfix[i]):
common.append(len(prefix[i]))
if len(common)==0:
ret.append(0)
else:
ret.append(max(common))
return ret
def KMP_match(attribute_value_tokens,definition_tokens):
table=caculate_partial_table(attribute_value_tokens)
m=len(definition_tokens)
n=len(attribute_value_tokens)
cur=0
while cur<=m-n:
for i in range(n):
if definition_tokens[i+cur][0]!=attribute_value_tokens[i][0]:
cur += max(i - table[i-1], 1)#有了部分匹配表,我们不只是单纯的1位1位往右移,可以一次移动多位
break
else:
return cur
return -1
def KMP_match_token(attribute_value_tokens,definition_tokens):
# print attribute_value_tokens,definition_tokens
table=caculate_partial_table(attribute_value_tokens)
m=len(definition_tokens)
n=len(attribute_value_tokens)
cur=0
while cur<=m-n:
for i in range(n):
if definition_tokens[i+cur][0]!=attribute_value_tokens[i][0] or definition_tokens[i+cur][1]!=attribute_value_tokens[i][1]:
cur += max(i - table[i-1], 1)#有了部分匹配表,我们不只是单纯的1位1位往右移,可以一次移动多位
break
else:
return cur
return -1
def get_match_pos(attribute_value_tokens,tokens):
start=KMP_match(attribute_value_tokens,tokens)
end=start+len(attribute_value_tokens)
return (start,end)
def get_match_pos_token(attribute_value_tokens,tokens):
start=KMP_match_token(attribute_value_tokens,tokens)
end=start+len(attribute_value_tokens)
return (start,end) |
985,239 | b37e09c93d48f777ba3ddc39ff4c5fc654978ec1 | #------------------------------------------------------------------------------
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import unittest
from datetime import date
from traits.api import (HasStrictTraits, TraitError, Float, Instance, Date)
from enaml.core.trait_types import Bounded
class Test_Bounded_Static(unittest.TestCase):
""" Test the use of the Bounded trait with static bounds.
"""
@classmethod
def setUpClass(self):
class my_class(HasStrictTraits):
value = Bounded(0.2, 0, 4)
self.traits_class = my_class
def setUp(self):
self.traits_instance = self.traits_class()
def test_init(self):
""" Test basic static initialization """
value = self.traits_instance.value
self.assertAlmostEqual(value, 0.2)
def test_assigment(self):
""" Test static assigment """
instance = self.traits_instance
instance.value = 0.7
self.assertAlmostEqual(instance.value, 0.7)
def test_invalid(self):
""" Test static assigment """
instance = self.traits_instance
with self.assertRaises(TraitError):
instance.value = -2
class Test_Bounded_Dynamic(unittest.TestCase):
""" Test the use of the Bounded trait with dynamic bounds.
"""
@classmethod
def setUpClass(self):
class my_class(HasStrictTraits):
low = Float(0)
high = Float(4)
value = Bounded(0.2, low='low', high='high')
self.traits_class = my_class
def setUp(self):
self.traits_instance = self.traits_class()
def test_init(self):
""" Test dynamic initialization. """
value = self.traits_instance.value
self.assertAlmostEqual(value, 0.2)
def test_assigment(self):
""" Test assigment. """
instance = self.traits_instance
instance.value = 0.7
self.assertAlmostEqual(instance.value, 0.7)
def test_invalid(self):
""" Test invalid assigment. """
instance = self.traits_instance
with self.assertRaises(TraitError):
instance.value = -2
def test_change_lower(self):
""" Test changing the lower bound. """
instance = self.traits_instance
instance.low = -4.0
instance.value = -2
self.assertAlmostEqual(instance.value, -2)
def test_change_upper(self):
""" Test changing the upper bound. """
instance = self.traits_instance
instance.high = 6.0
instance.value = 5.7
self.assertAlmostEqual(instance.value, 5.7)
class Test_Bounded_Special(unittest.TestCase):
""" Test special use cases for the Bounded trait.
"""
def test_inner_bound_class(self):
""" Test dynamic initialization with inner class.
"""
class small_class(HasStrictTraits):
low = Float(0)
high = Float(2)
class main_class(HasStrictTraits):
bounds = Instance(small_class, ())
value = Bounded(0.2, 'bounds.low', 'bounds.high')
instance = main_class()
instance.value = 0.2
self.assertAlmostEqual(instance.value, 0.2)
with self.assertRaises(TraitError):
instance.value = -1
def test_bounded_traits(self):
""" Test initialization with Trait Class
"""
class main_class(HasStrictTraits):
value = Bounded(Date(date(2007,12, 18)),
date(2003,12, 18),
date(2010,12, 18))
instance = main_class()
self.assertEqual(instance.value, date(2007,12, 18))
with self.assertRaises(TraitError):
instance.value = 0.2
instance.value = date(2008,12, 18)
self.assertEqual(instance.value, date(2008,12, 18))
def test_bounded_python(self):
""" Test initialization wiht complex python object.
"""
class main_class(HasStrictTraits):
value = Bounded(date(2007,12, 18),
date(2003,12, 18),
date(2010,12, 18))
instance = main_class()
self.assertEqual(instance.value, date(2007,12, 18))
with self.assertRaises(TraitError):
instance.value = 0.2
instance.value = date(2008,12, 18)
self.assertEqual(instance.value, date(2008,12, 18)) |
985,240 | 0bee68671079a011334d1eed0ad08c4febb49340 | class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = {}
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
self.d[key] = val
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
return sum([self.d[i] for i in self.d if i.startswith(prefix)])
class TrieNode(object):
def __init__(self):
self.children = {}
self.score = 0
class MapSumTrie(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = {}
self.root = TrieNode()
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
delta = val - self.d.get(key, 0)
self.d[key] = val
cur = self.root
cur.score += delta
for c in key:
cur = cur.children.setdefault(c, TrieNode())
cur.score += delta
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
cur = self.root
for c in prefix:
if c not in cur.children:
return 0
cur = cur.children[c]
return cur.score
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
a = MapSumTrie()
a.insert("apple", 3)
a.insert("apple", 2)
a.insert("ap", 2)
a.insert("y", 1)
|
985,241 | fbb17285443326ee6185fa9f3e752523af0214ed | from __future__ import unicode_literals
from django.db import models
import re
import datetime
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
''' BASIC VALIDATIONS:
- Email must be of specified regex format
- Name must have length of at least 1
- Password and password_conf need to match
'''
class UserManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['name']) < 1:
errors['name'] = "First name field should include at least 1 character"
if len(postData['alias']) < 1:
errors['alias'] = "Last name field should include at least 1 character"
if not re.match(EMAIL_REGEX, postData['email']):
errors['email'] = "Email is not fomatted correctly"
if len(postData['password']) < 8:
errors['password'] = "Password field must include at least 8 characters"
if postData['password'] != postData['password_conf']:
errors['password_conf'] = "Password field must match Password Confirmation field"
if str(postData['birthdate']) > str(datetime.date.today()):
errors['birthdate'] = "Birthdate must be before today"
if not postData['birthdate']:
errors['birthdate'] = "Please enter a birthdate"
if errors:
return errors
else:
pass
class User(models.Model):
name = models.CharField(max_length=255)
alias = models.CharField(max_length=255, unique=True)
email = models.CharField(max_length=255, unique=True)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = UserManager()
class QuoteManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['author']) < 1:
errors['author'] = "'Quoted By:' field should include at least 1 character"
if len(postData['quotation']) < 1:
errors['quotation'] = "'Quotation:' field should include at least 1 character"
if errors:
return errors
else:
pass
class Quote(models.Model):
author = models.CharField(max_length=254)
quotation = models.TextField()
poster = models.ForeignKey(User, related_name = "posters")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
users = models.ManyToManyField(User, related_name="quotes")
objects = QuoteManager() |
985,242 | d2bc164c73b8e36dbcee2a48cf6e21a298a735d2 | import numpy as np
import sys
sys.path.append('sample\ch04')
from p157unigram_sampler import UnigramSampler
corpus = np.array([0, 1, 2, 3, 4, 1, 2, 3])
power = 0.75
sample_size = 2
sampler = UnigramSampler(corpus, power,sample_size)
targe =np.array([1, 3, 0])
negative_sample = sampler.get_negative_sample(targe)
print(negative_sample) |
985,243 | 5ea1274b3a1ae5922a967de0cc248441d2962951 | from pynput import keyboard
class Hotkey:
"""
Class used to handle the hotkey listening
"""
def __init__(self, frame):
# The key combination to check
self.frame = frame
self.hotkey = frame.hotkey
self.COMBINATION = [{keyboard.Key.alt, keyboard.KeyCode(char=self.hotkey)},
{keyboard.Key.alt_l, keyboard.KeyCode(char=self.hotkey)},
{keyboard.Key.alt_r, keyboard.KeyCode(char=self.hotkey)}]
# The currently active modifiers
self.current = set()
def on_press(self, key):
# If the key press is part of any in the COMBINATION array
if any([key in comb for comb in self.COMBINATION]):
self.current.add(key)
# If we now have all of the keys in one of the entries of the array
if any(all(k in self.current for k in comb) for comb in self.COMBINATION):
self.frame.toggle_pause()
# remove everything in the 'current' set (i.e reset hotkey)
self.current.clear()
def on_release(self, key):
try:
self.current.remove(key)
except KeyError:
pass
|
985,244 | 62e44f349196526ddea98fde3c4017e56df721b4 | # Nested If (การซ้อน If)
x = 41
if x > 10:
print("x มากกว่า 10")
if x > 20:
print("และ x มากกว่า 20")
else:
print("แต่ x ไม่มากกว่า 10") |
985,245 | c74628df0c02c389ab66364fa72d5cea736bcf0b | L = int(input())
res = (L/3) ** 3
print(res) |
985,246 | 17f8fab312b8a906612fee52d04a21f994dcf6fd | # encoding:utf-8
'''
@Author: catnlp
@Email: wk_nlp@163.com
@Time: 2018/5/18 12:18
'''
import os
def addContent(target, file, tag):
print(file)
beginTag = '<' + tag + '>'
endTag = '</' + tag + '>'
with open(file) as src:
lines = src.read()
lines = lines.replace('\n\n', '\n'+endTag+'\tS-'+tag+'\n\n'+beginTag+'\tS-'+tag+'\n')
lines = beginTag + '\tS-'+tag+'\n' + lines + endTag + '\tS-'+tag+'\n\n'
target.write(lines)
def make_jointCorpus(dataset, name, dirList):
print('---make joint corpus---')
if dataset[-1] == '/':
dataset = dataset[0: -1]
joint_dir = dataset + '/' + name
if not os.path.exists(joint_dir):
os.makedirs(joint_dir)
trainF = joint_dir + '/train.tsv'
develF = joint_dir + '/devel.tsv'
testF = joint_dir + '/test.tsv'
with open(trainF, 'w') as trainF, open(develF, 'w') as develF, open(testF, 'w') as testF:
for dir in dirList:
tag = dir
dir = dataset + '/' + dir + '-IOBES'
addContent(trainF, dir+'/train.tsv', tag)
addContent(develF, dir+'/devel.tsv', tag)
addContent(testF, dir+'/test.tsv', tag)
if __name__ == "__main__":
dataset = '../../data/group/species'
name = 'joint-species'
dirList = ['BioNLP11ID-species', 'BioNLP13CG-species', 'CRAFT-species', 'linnaeus']
make_jointCorpus(dataset, name, dirList) |
985,247 | b01adf4cc8a22652e19040decbbf33f765de7e03 | from rest_framework import serializers
from . import models
from .models import Note, Comment
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
fields = kwargs.pop('fields', None)
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields:
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
class NoteSerializer(DynamicFieldsModelSerializer):
content_object = serializers.HyperlinkedRelatedField(view_name='user_detail', read_only=True)
comments = serializers.HyperlinkedRelatedField(view_name='comment_detail', read_only=True, many=True)
link = serializers.HyperlinkedIdentityField(view_name='note_detail', read_only=True)
class Meta:
model = models.Note
fields = ['text', 'image', 'content_object', 'uploaded_time', 'comments', 'link']
def create(self, validated_data):
request = self.context['request']
note = Note(text=validated_data['text'], image=validated_data['image'], content_object=request.user)
note.save()
return note
class CommentSerializer(serializers.ModelSerializer):
to = serializers.ChoiceField(choices=[], write_only=True)
comment = serializers.HyperlinkedRelatedField(view_name='comment_detail', read_only=True)
author = serializers.HyperlinkedRelatedField(view_name='user_detail', read_only=True)
note = serializers.HyperlinkedRelatedField(view_name='note_detail', read_only=True)
class Meta:
model = models.Comment
fields = ['text', 'author', 'note', 'comment', 'uploaded_time', 'to']
def save(self, *args, **kwargs):
to = self.validated_data['to']
request = self.context.get('request')
if 'note' in to:
note = Note.objects.get(id=int(to[5:]))
parent_comment = None
else:
parent_comment = Comment.objects.get(id=int(to[8:]))
note = Note.objects.get(comments=parent_comment)
comment = Comment(text=self.validated_data['text'],
note=note,
author=request.user,
comment=parent_comment)
comment.save()
return comment
def __init__(self, *args, **kwargs):
super(CommentSerializer, self).__init__(*args, **kwargs)
NOTES_CHOICES = [('note ' + str(note.id), str(note.text)) for note
in Note.get_notes_can_be_comment(self.context['request'].user)]
COMMENTS_CHOICES = [('comment ' + str(comment.id), str(comment.text)) for comment
in Comment.get_comments_can_be_comment(self.context['request'].user)]
TO_CHOICES = [
('Note', NOTES_CHOICES),
('Comment', COMMENTS_CHOICES)
]
self.fields['to'] = serializers.ChoiceField(choices=TO_CHOICES, write_only=True)
if self.instance is not None:
self.fields.pop('to')
|
985,248 | 9405cb97735ccd35f63889807f2b553aada4a9d3 | def main():
lines = input()
encypted_results = []
for i in range(0,lines):
curr_result = input()
encypted_results.append(str(curr_result))
for result in encypted_results:
# positive result
if result == "1" or result == "4" or result == "78":
print("+")
# negative result
else if result.endswith("35"):
print("-")
# Failed
else if result.startswith("9") and result.endswith("4"):
print ("*")
# Not Completed
else if result.startswith("190"):
print("?")
else:
print("+")
return 0
main()
|
985,249 | 84daaf9e9189a39b1780b49f046a53a2a17c0286 | # ------------------------------------------------------------------------------
# Copyright (c) 2020 Zero A.E., LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import pytest
from click.testing import CliRunner
from zeroae.smee import cli
@pytest.fixture()
def smee_server_mock(requests_mock):
url = "mock://smee.io/new"
requests_mock.get(
url,
text="\n".join(
[
"event:ready\ndata:{}\n",
"event:ping\ndata:{}\n",
'data:{"body":{},"timestamp":1,"query":{}}\n\n',
]
),
)
return url
def test_command_line_interface(smee_server_mock, requests_mock):
"""Test the SMEE CLI."""
runner = CliRunner()
args = [f"--url={smee_server_mock}"]
target_url = "mock://target.io/events"
requests_mock.post(target_url)
args += [f"--target={target_url}"]
help_result = runner.invoke(cli.smee, args)
assert help_result.exit_code == 0
assert f"Connected {smee_server_mock}" in help_result.output
@pytest.mark.parametrize(
"port,path", [(None, None), (6000, None), (None, "/events"), (6000, "/events")]
)
def test_command_line_interface_port_path(port, path, smee_server_mock, requests_mock):
"""Test the SMEE CLI."""
runner = CliRunner()
args = [f"--url={smee_server_mock}"]
if port is None:
port = 3000
else:
args += [f"--port={port}"]
if path is None:
path = "/"
else:
args += [f"--path={path}"]
target_url = f"http://127.0.0.1:{port}{path}"
requests_mock.post(target_url)
help_result = runner.invoke(cli.smee, args)
assert help_result.exit_code == 0
assert f"Connected {smee_server_mock}" in help_result.output
|
985,250 | fc1b54b75258da437f09bc94e435769d116a440e | #coding: utf-8
from django.db import models
from django.core.files.storage import FileSystemStorage
from django.utils.safestring import mark_safe
fs = FileSystemStorage(location='/media')
class Imovel(models.Model):
nome = models.CharField(max_length=100)
endereco = models.CharField(max_length=100)
preco = models.DecimalField(max_digits=10, decimal_places=2)
photo = models.ImageField(upload_to='documents/')
def image_tag(self):
return mark_safe('<img src="%s" width="150" height="150" />' % (self.photo.url))
class Meta:
verbose_name = "Imóvel"
verbose_name_plural = "Imóveis"
def __unicode__(self):
return self.nome |
985,251 | 0dbeb67a7de107659a55faef986f15d0090d73a7 | import sqlite3
conn = sqlite3.connect('BOVESPAteste')
conn.text_factory = str
cur = conn.cursor()
fname = raw_input('Enter file name: ')
if len(fname)<1: fname = 'DemoCotacoesHistoricas12022003.txt'
pasta = '/home/user/Downloads/Hist_Bovespa/'
with open(pasta+fname,'r') as file:
lines = file.readlines()
for line in lines[1:-1]:
identi_id = int(line[0:2])
data = line[2:6] + '-' + line[6:8] + '-' + line[8:10]
codbdi_id = int(line[10:12])
acoes_sigla = line[12:24].strip()
tpmerc_id = int(line[24:27])
acoes_nome = line[27:39].strip()
especi_sigla = line[39:49].strip()
try:
prazot = int(line[49:52])
except:
prazot = None
moeda = line[52:56].strip()
preabe = float(line[56:69])/100
premax = float(line[69:82])/100
premin = float(line[82:95])/100
premed = float(line[95:108])/100
preult = float(line[108:121])/100
preofc = float(line[121:134])/100
preofv = float(line[134:147])/100
totneg = int(line[147:152])
quatot = int(line[152:170])
voltot = float(line[170:188])/100
preexe = float(line[188:201])/100
indopc_id = int(line[201])
datven = line[202:206] + '-' + line[206:208] + '-' + line[208:210]
fatcot = int(line[210:217])
ptoexe = float(line[217:230])/1000000
dismes = int(line[242:245])
cur.execute('INSERT OR IGNORE INTO ESPECI(sigla) VALUES(?)',(especi_sigla,))
cur.execute('INSERT OR IGNORE INTO MODREF(moeda) VALUES(?)',(moeda,))
cur.execute('INSERT OR IGNORE INTO ACOES(sigla,nome) VALUES(?,?)',(acoes_sigla,acoes_nome))
cur.execute('SELECT id FROM ESPECI WHERE sigla = ?',(especi_sigla,))
especi_id = cur.fetchone()[0]
cur.execute('SELECT id FROM MODREF WHERE moeda = ?',(moeda,))
moeda_id = cur.fetchone()[0]
cur.execute('SELECT id FROM ACOES WHERE sigla = ?',(acoes_sigla,))
acoes_id = cur.fetchone()[0]
cur.execute('''INSERT INTO ATRIBUTOS(data,prazot,preabe,premax,premin,premed,preult,
preofc,preofv,totneg,quatot,voltot,preexe,datven,fatcot,ptoexe,dismes,id_identi,
id_codbdi,id_especi,id_modref,id_indopc,id_tpmerc,id_acao)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',(data,prazot,preabe,
premax,premin,premed,preult,preofc,preofv,totneg,quatot,voltot,preexe,datven,fatcot,
ptoexe,dismes,identi_id,codbdi_id,especi_id,moeda_id,indopc_id,tpmerc_id,acoes_id))
conn.commit()
cur.close()
file.close() |
985,252 | 9c6c38ed41b2b7cd7934dd60653d844826cd01ad | # Given a population as stdin, and a position and instruction as arguments,
# creates random hybrids (the Avida way), and prints those hybrids
# that do not have the specified allele
import sys
import random
N_HYBRIDS = 10000
def create_random_hybrid_from_pop(pop):
parents = choose_parents(pop)
hybrid = create_random_hybrid(parents)
return hybrid
def choose_parents(pop):
parent_1 = random.choice(pop)
parent_2 = random.choice(pop)
return [parent_1, parent_2]
def create_random_hybrid(parents):
length = len(parents[0])
chunk = get_random_chunk(length)
return create_hybrid(parents, chunk)
def get_random_chunk(length):
start_pos = random.randint(0, length - 1)
size = random.randint(1, length - 1)
return get_chunk(start_pos, size, length)
def get_chunk(start_pos, size, length):
chunk_pos = range(start_pos, start_pos + size)
return [wrapped_pos(pos, length) for pos in chunk_pos]
def wrapped_pos(pos, length):
return pos % length
def create_hybrid(parents, chunk):
length = len(parents[0])
hybrid_parts = [choose_parent_inst(parents, chunk, i) for i in range(length)]
return list_to_string(hybrid_parts)
def choose_parent_inst(parents, chunk, pos):
return parents[1][pos] if pos in chunk else parents[0][pos]
def list_to_string(alist):
return ''.join(alist)
def has_allele(sequence, pos, inst):
return sequence[pos] == inst
if len(sys.argv) < 2:
print 'Arguments: pos inst'
exit(1)
pos = int(sys.argv[1])
inst = sys.argv[2]
pop = [genotype.strip() for genotype in sys.stdin]
for i in range(N_HYBRIDS):
hybrid = create_random_hybrid_from_pop(pop)
while has_allele(hybrid, pos, inst):
hybrid = create_random_hybrid_from_pop(pop)
print hybrid
|
985,253 | e5abc199013dc57ebbd6712fb3535256c3847436 |
import array
import random
import json
import sys
import traceback
import inspect
import sys_output
from datetime import datetime
import numpy
from math import sqrt
import os
from deap import algorithms
from deap import base
from deap import benchmarks
from deap.benchmarks.tools import diversity, convergence, hypervolume
from deap import creator
from deap import tools
from random import choice, shuffle, randint, randrange
from scipy.spatial import distance
import decision_tree
import generate_data
import create_xml_scenario
POS_FILE = "position.txt"
ROT_FILE = "rotation.txt"
def main(num_gen, num_pop):
#run beamNG to collect data
sys_output.print_star_end("Start the process of generation testcases with NSGAII_DT")
int_num_gent = int(num_gen)
int_num_pop = int(num_pop)
# print("\n Collect data from BeamNG.research with Grid Map. This process takes around 1 minute. BeamNG is running. Please wailt.... ")
#(pos_beamNG ,rot_beamNG) = data_map.collect_data(beamNG_path)
#run nsga2 algorithms
#sys_output.print_title(" Finish collect data from BeamNG.research")
nsga2(int_num_gent,int_num_pop)
sys_output.print_star_end("End the process of generation testcases from NSGAII_DT")
def print_pop(pop):
for id, ind in enumerate(pop):
print("\n "+ str(id) + " " +str(ind))
print("\n")
def file_name():
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
filename = module.__file__
return filename
def write_data(name_file,arg):
file = name_file
with open(file, 'w') as f:
for line in arg:
strLine = str(line)
f.write(str(strLine[1:-1]) +"\n")
def gen_pop(num_ind):
dist = []
speed = []
for _ in range(num_ind):
#temp_dist = random.randint(10, 60) #old setting in beamNG.research
temp_dist = random.randint(10, 200)
dist.append(temp_dist)
temp_speed = random.randint(10, 80)
speed.append(temp_speed)
dist_speed_list = list(zip(dist, speed))
pop = []
for i in range(num_ind):
ind = (0,0,0,0,dist_speed_list[i][0],0,dist_speed_list[i][1])
pop.append(ind)
write_data("population.txt",pop)
return pop
def nsga2(number_gen, num_pop):
creator.create("Fitness", base.Fitness, weights=(-1.0, 1.0))
creator.create("Individual", list, fitness=creator.Fitness)
toolbox = base.Toolbox()
# POS_FILE = "position.txt"
# ROT_FILE = "rotation.txt"
# get position and rotation from data files
# pos = generate_data.getData(POS_FILE)
# rot = generate_data.getData(ROT_FILE)
#initial_pos_rot = generate_data.get_pos_rot_from_list(pos,rot)
initial_pos_rot = gen_pop(num_pop)
now = datetime.now()
path_dir = os.getcwd()
def get_scenario_State():
temp_dist = random.randint(10, 200)
temp_speed = random.randint(10, 80)
# car1_list = []
# minLen = min(len(pos),len(rot))
# for i in range(minLen):
# state = pos[i] +','+ rot [i]
# car1_list.append(state)
# #speed of car
scenario = []
# random_index_pos_list = randrange(0,minLen-1)
# car1 = car1_list[random_index_pos_list]
# #car2 = car2_list[random_index_pos_list]
# car2 = generate_data.get_pos_rot_secondCar(car1_list,random_index_pos_list)
car1 = [0,0,0,0,90,0] #rotation need to be checked later, 3 last element is rotation
car2 = [0,temp_dist,0,0,90,0] #rotation need to be checked later, 3 last element is rotation
scenario.append(car1)
scenario.append(car2)
scenario.append(temp_speed)
return scenario
# evaluation function with two parameter of distance(car1, car2) and speed of car2
def evaluation_collision(individual):
dist = individual[1][1]
print("distance of 2 car",dist )
speed = individual[2]
if (dist not in range(10,50)):
return 10000, 0 # Ensure distance of two car too far and speed already checked in range(20,100)
return dist, speed
def mut_randChoice(individual):
rand_index = randint(0, len(individual)-1)
num_car1_state = randint(0, len(initial_pos_rot)-1)
if rand_index== 0:
individual[0] = initial_pos_rot[num_car1_state]
elif rand_index == 1:
individual[1] = initial_pos_rot[num_car1_state]
else:
individual[2] = randint(10,35)
return individual
def crossOver(ind1, ind2):
id = randint(0,2)
#keep the first element, change the second element (change car2)
if id == 0 :
tmp = ind1[1]
ind1[1] = ind2[1]
ind2[1] = tmp
# keep the second element, change the first element (change car 1)
elif id == 1 :
tmp = ind1[0]
ind1[0] = ind2[0]
ind2[0] = tmp
# change speed of two individual
elif id == 2:
tmp = ind1[2]
ind1[2] = ind2[2]
ind2[2] = tmp
return ind1, ind2
toolbox.register("attr_float", get_scenario_State )
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", evaluation_collision)
toolbox.register("mate", crossOver)
toolbox.register("mutate", mut_randChoice)
toolbox.register("select", tools.selNSGA2)
NGEN = int(number_gen) #number of generation
MU = int(num_pop) #number of population
CXPB = 0.9
print("=========================== START NSGAII ====================================================== ")
print("Number of Genaration: " , NGEN)
print("Number of Population: " , MU)
print("=============================================================================================== ")
stats = tools.Statistics(lambda ind: ind.fitness.values)
pop = toolbox.population(n=MU)
print("\n\n\n Initial population : \n\
Each Car is a vector 6 dimensions. (x1,y1,z1,e1,u1,v1). position (x,y,z), rotation (e,u,v)\n\
Each indivudual has 2 car (12 parameters, 6 for each car) and state of scenarios \n\
(x1,y1,z1,e1,u1,v1, x2,y2,z2,e2,u2,v2, state )\n")
print("-----------------------------------------------------------------------------------------------")
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
pop = toolbox.select(pop, len(pop))
print_pop(pop)
#Intergration DT in select population
record = stats.compile(pop)
#logbook.record(gen=0, evals=len(invalid_ind), **record)
# Begin the generational process
print(str( sys_output.trace_func(str(file_name()), str(sys._getframe().f_code.co_name))))
for gen in range(1, NGEN+1):
# Vary the population
sys_output.print_title("ENTER LOOP OF GENERATION WITH DECISION TREE: Generation " + str(gen))
sys_output.print_sub_tit("1. Select individuals from Decision Tree")
# select population from decision tree( The testcases are run on BuildDrive)
pop_DT = select_pop_DT(pop)
print ("\n\n Population is select for generation: ")
print_pop(pop_DT[1])
pop_DT_new = select_gen_from_pop_DtPop(MU,pop)
pop = toolbox.select(pop_DT_new, MU)
sys_output.print_sub_tit("2. Generate offspring ....")
#offspring = tools.selTournamentDCD(pop, len(pop))
offspring = tools.selRandom(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
sys_output.print_sub_tit("3. Select individuals from offspring and population")
print("\n3. Select individuals from offspring and population \n")
record = stats.compile(pop)
#logbook.record(gen=gen, evals=len(invalid_ind), **record)
#save generation population
print("Population is generated ")
print_pop(pop)
date_time = now.strftime("%m_%d_%Y__%H%M%S")
file_gen = path_dir +"\store\population_" + date_time +".txt"
with open(file_gen, 'w') as f:
f.write(str(pop))
sys_output.print_sub_tit("Generation %s is save in %s "%(gen,file_gen ))
file = "population.txt"
sys_output.print_sub_tit("Final population is saved in %s "%(path_dir +"\population.txt" ))
with open(file, 'w') as f:
for line in pop:
f.write(str(line)+"\n")
return pop
def select_pop_DT(pop):
"""
the population are select from critical situation by DT
"""
new_pop = []
#run population on DriveBuild
data = decision_tree.convert_dataFrame(pop)
now = datetime.now()
date_time = now.strftime("%m_%d_%Y__%H%M%S")
path_dir = os.getcwd()
file_name_cvs = path_dir +"\store\\DT_data" + date_time +".cvs"
data.to_csv(r'decision_tree_data.cvs')
print("\n Data to build the tree is save in " , file_name_cvs )
print("\nc. Train decision tree and select critical events")
#get leaf which contains the critical situations
pop_idx = decision_tree.get_critical_sample_DT(data)
#print("index of decision tree", pop_idx)
for i in pop_idx:
new_pop.append(pop[i])
return pop_idx, new_pop
def select_gen_from_pop_DtPop(num_pop, pop):
idx, dt_pop = select_pop_DT(pop)
counter = len(dt_pop)
print("\n Select_gen_from_pop_DtPop \n ")
print_pop(pop)
while counter < num_pop:
new_idx = randint(0,len(pop)-1)
#check whether the element is selected from population or not
while check_idx(idx,new_idx):
new_idx = randint(0,len(pop)-1)
idx.append(new_idx)
dt_pop.append(pop[new_idx])
counter +=1
print("\n List idx which is used to select individuals from populaiton: \n", idx)
return dt_pop
def check_idx(list_idx, x):
found = 0
for item in list_idx:
if x == item:
return 1
return found
if __name__ == "__main__":
main(sys.argv[1],sys.argv[2])
|
985,254 | ea01162b16889ddd1fe22c71cfa72fff85ae30d1 | from jupyterdrive.clientsidenbmanager import ClientSideContentsManager
from jupyterdrive.mixednbmanager import MixedContentsManager
import inspect
def doesmatch(TheClass):
"""
check wether all the methods of TheClass have the same signature
as in the base parent class to track potential regression, or evolution upstream
"""
import sys
if sys.version_info.major < 3:
return None
S = TheClass.__base__
for meth_name in dir(TheClass):
if not hasattr(S, meth_name):
continue
meth = getattr(TheClass, meth_name)
if(callable(meth)):
try:
match = (inspect.signature(meth) == inspect.signature(getattr(S,meth_name)))
#assert(match)
if not match:
print(meth_name, ' : does not match parent signature', inspect.signature(meth) , inspect.signature(getattr(S,meth_name)))
except ValueError:
pass
def test_1():
doesmatch(ClientSideContentsManager)
def test_2():
doesmatch(MixedContentsManager)
|
985,255 | 4d0aff3c7cd5c9bd74a8791c2f34e00008003977 | import cv2
#Varsayılan kamera aygıtına bağlan
cap = cv2.VideoCapture(0)
while(True):
# görüntü oku
ret, frame = cap.read()
# alınan görüntüyü göster
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# kamerayı kapat
cap.release()
cv2.destroyAllWindows() |
985,256 | 5419d9457aeb135f2c9e7b64d24818340b8fae45 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 17:43:36 2020
sort csv fn
@author: minjie
"""
import pandas as pd
import numpy as np
#%%
fns = ['../checkpoints/eval_resnet50_singleview-Loss-ce-tta-0.csv',
'../checkpoints/eval_resnet50_singleview-Loss-ce-tta-1.csv',
'../checkpoints/eval_resnet50_metasingleview-Loss-ce-tta-0.csv',
'../checkpoints/eval_resnet50_metasingleview-Loss-ce-tta-1.csv',
'../checkpoints/eval_effnetb4_singleview-Loss-ce-tta-0.csv',
'../checkpoints/eval_effnetb4_singleview-Loss-ce-tta-1.csv',
'../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-0.csv',
'../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-1.csv',
]
for fn in fns:
gts = pd.read_csv(fn).values
idx = np.argsort(gts[:,0])
gts = gts[idx,:]
df = pd.DataFrame(data = gts[:,1:].astype('float32'),index =gts[:,0], columns = [ 'MEL', 'NV','BCC', 'AKIEC', 'BKL', 'DF','VASC','pred', 'GT'])
for col in [ 'MEL', 'NV','BCC', 'AKIEC', 'BKL', 'DF','VASC']:
df[col] = df[col].apply(lambda x: format(x,'.4f'))
for col in ['pred', 'GT']:
df[col] = df[col].apply(lambda x: format(x,'.0f'))
df.to_csv(fn)
|
985,257 | f4180a3570a94586ed96dac50d2488373e3cd5fc |
# Returns a bool indicating whether the list is sorted (i.e. is non-decreasing,
# it is okay to have adjacent elements which are equal).
#
# Arguments:
# l (type: list of ints): list that may or may not be sorted.
#
# Example:
# list_is_sorted([1,2,2,8,9]) should return True.
# list_is_sorted([1,2,2,8,6]) should return False.
def list_is_sorted(l):
# We will iterate through the elements of the list, and compare each element
# with the next element (except for the last element of the list, which
# does not have a next element).
#
# This variable tracks the index we are currently at. We initially set it to 0
# because we want to start at the beginning of the list.
index = 0
# Once index reaches len(l)-1, there is no longer a "next" element to compare
# to, so we can stop the while loop.
while index < len(l) - 1:
# If the element at index is strictly larger than the next element, then l
# is not sorted. In this case, we can immediately return False.
if l[index] > l[index + 1]:
return False
# Update index to one larger than its previous value.
index = index + 1
# We have iterated through the entire list, and every element is
# less-than-or-equal-to the next element (expect for the last element, for
# which there is no next element), so the list is sorted, and we can return
# True.
return True
# This should print True
print(list_is_sorted([1,2,2,8,9]))
# This should print False
print(list_is_sorted([1,2,2,8,6]))
|
985,258 | 99ee41a0932107037075500aea9226c67955d6aa | class MyFirstClass:
def __init__(self, name, old):
self.name = name
self.old = old
def __str__(self):
return 'privet'
def return_class(self):
x = self.name + 'Bodnar'
return x
a = MyFirstClass('John', 18)
b = MyFirstClass('Tom', 22)
print(a) |
985,259 | 048582e4dc805d2e899a1ce24397809fc87b6733 | for _ in range(int(input())):
a, b = divmod(int(input()), 2)
print(a - (1-b)) |
985,260 | 653cde5a17b5250fee6ee821a7d5053291356c03 | from django.db import models
from django.utils import timezone
class BlogTopics(models.Model):
topic = models.CharField(max_length=120)
def __str__(self):
return self.topic
class NewPost(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=90)
topic = models.ForeignKey(BlogTopics)
main_text = models.TextField(max_length=5000, blank=True, null=True)
date_created = models.DateTimeField(default=timezone.now)
posted_date = models.DateTimeField(blank=True, null=True)
comments = models.TextField(max_length=366)
def publish(self):
self.posted_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
985,261 | 472467d3f46a9b4be269c2ead69bbaea98d08b1a | # -*- coding: utf-8 -*-
# (c) 2016 Bright Interactive Limited. All rights reserved.
# http://www.bright-interactive.com | info@bright-interactive.com
from functools import wraps
from django.conf import settings
from django.utils.decorators import available_attrs
from assetbankauth.utils import authenticate_token_in_request, assetbank_login_redirect, authenticated_user_in_session
def ensure_assetbank_authenticated_user_in_session():
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if not settings.ASSETBANK_AUTH_ENABLED or authenticated_user_in_session(request) or authenticate_token_in_request(request):
return view_func(request, *args, **kwargs)
else:
return assetbank_login_redirect(request)
return _wrapped_view
return decorator
|
985,262 | 2eb7b63c0015abc72a01716806888910c5b57471 | from django.db.models.fields.files import ImageFieldFile, FileField, FieldFile
from django.db.models import Model
from datetime import datetime
def get_json_serializable(instance, *args):
data = {}
fields = get_all_fields(instance)
for field in args:
if field in fields:
continue
fields.append(field)
for field in fields:
val = getattr(instance, field, None)
if isinstance(val, datetime):
val = str(val)
if isinstance(val, ImageFieldFile) or isinstance(val, FileField) or isinstance(val, FieldFile):
try:
val = val.url
except:
continue
if isinstance(val, Model):
val = get_json_serializable(val)
data[field] = val
return data
def serialize(classmodel, *args, **kwargs):
response = []
for model_object in classmodel.objects.filter(**kwargs):
data = get_json_serializable(model_object, *args)
response.append(data)
return response
def get_all_fields(classmodel):
return [ f.name for f in classmodel._meta.fields + classmodel._meta.many_to_many ]
|
985,263 | 78a458f8f03727f4a57b0a14ccd6bcdff7ec023b | import pandas as pd
from linora.metrics._utils import _sample_weight
__all__ = ['mapk', 'hit_ratio', 'mean_reciprocal_rank']
def mapk(y_true, y_pred, k, sample_weight=None):
"""Mean Average Precision k
Args:
y_true: pd.Series or array or list, ground truth (correct) labels.
y_pred: pd.Series or array or list, predicted values, as returned by a rank.
k: int, top k predict values.
sample_weight: list or array of sample weight.
Returns:
Mean Average Precision k values.
"""
def apk(actual, predict, weight, k):
if len(predict)>k:
predict = predict[:k]
score = 0.0
nums = 0.0
for i,p in enumerate(predict):
if p in actual and p not in predict[:i]:
nums += 1.0
score += nums / (i+1.0)
return score / min(len(actual), k)*weight if actual else 0.0
sample_weight = _sample_weight(y_true, sample_weight)
return pd.DataFrame({'label1':y_true, 'label2':y_pred, 'weight':sample_weight}).apply(lambda x:apk(x[0], x[1], x[2], k=k), axis=1).mean()
def hit_ratio(y_true, y_pred, k, sample_weight=None):
"""Hit Ratio k
Args:
y_true: pd.Series or array or list, ground truth (correct) labels.
y_pred: pd.Series or array or list, predicted values, as returned by a rank.
k: int, top k predict values.
sample_weight: list or array of sample weight.
Returns:
Hit Ratio k values.
"""
sample_weight = _sample_weight(y_true, sample_weight)
t = pd.DataFrame({'label1':y_true, 'label2':y_pred, 'weight':sample_weight})
return t.apply(lambda x:len(set(x[0]).intersection(set(x[1][:k])))*x[2], axis=1).sum()/t.label1.map(lambda x:len(set(x))).sum()
def mean_reciprocal_rank(y_true, y_pred, k, sample_weight=None):
"""Mean Reciprocal Rank
Args:
y_true: pd.Series or array or list, ground truth (correct) labels.
y_pred: pd.Series or array or list, predicted values, as returned by a rank.
k: int, top k predict values.
sample_weight: list or array of sample weight.
Returns:
mean reciprocal rank k values.
"""
def mrr(actual, predict, weight, k):
try:
rank = 1./(predict[:k].index(actual)+1)*weight
except:
rank = 0
return rank
sample_weight = _sample_weight(y_true, sample_weight)
return pd.DataFrame({'label1':y_true, 'label2':y_pred, 'weight':sample_weight}).apply(lambda x: mrr(x[0], x[1], x[2], k=k), axis=1).mean()
|
985,264 | b3c8eec999eaff7e5fd889ebfb8e5649b5c9dde9 | # ========================================================================
# Copyright (C) 2019 The MITRE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import unittest
from projectq import MainEngine
from projectq.ops import *
import math
class SuperpositionTests(unittest.TestCase):
"""
This class contains some basic tests to show how ProjectQ deals with qubits in superposition.
Each of these tests will create a circuit that prepares a superposition, simulates the circuit
multiple times, and measures the qubits after each iteration. The number of times that each
qubit is in the |0〉 state is recorded, and these results are compared to the expected probabilities
for each test.
"""
def run_test(self, test_function, description, iterations, target_probabilities, margin):
"""
Runs a given superposition preparation function as a unit test.
Parameters:
test_function (function): The function that implements the actual test, by
converting the qubits into the target state.
description (str): A description of the test, for logging.
iterations (int): The number of times to run the circuit before calculating
each qubit's |0〉 probability.
target_probabilities (list[float]): The expected probabilities for each qubit
of being in the |0〉 state.
margin (float): The allowed error margin for each qubit's probability.
Remarks:
ProjectQ doesn't actually build a standalone circuit / program object like
the other python frameworks; it actually runs the simulation instruction-by-
instruction, line-by-line right here in the Python code. This is a lot closer
to how Q# does things. Because of this, we don't actually build a circuit as
an object and pass it around; instead, we just run the whole simulation in
a for loop.
"""
print(f"Running test: {description}")
number_of_qubits = len(target_probabilities)
zero_counts = [0] * number_of_qubits
# Create the engine and the qubit register (it's more efficient to create it
# and reuse it outside of the loop than rebuilding it every iteration).
engine = MainEngine()
qubits = engine.allocate_qureg(number_of_qubits)
# Run the test N times.
for i in range(0, iterations):
# Run the test function, which will put the qubits into the desired state
test_function(qubits)
# Measure the qubits
for qubit in qubits:
Measure | qubit
# Flush the engine, ensuring all of the simulation is done
engine.flush()
# Increment the zero count for any qubit that was measured to be |0>
for i in range(0, number_of_qubits):
if int(qubits[i]) == 0:
zero_counts[i] += 1
else:
# Reset the qubit to |0> since we're reusing the register. Note that
# ProjectQ doesn't have a Reset function, so we have to do it manually.
X | qubits[i]
# Compare the probabilities with the targets
target_string = "Target: [ "
result_string = "Result: [ "
for i in range(number_of_qubits):
target_probability = target_probabilities[i]
measured_probability = zero_counts[i] / iterations # Python 3 automatically does float division
target_string += "{:.4f}".format(target_probability) + " ";
result_string += "{:.4f}".format(measured_probability) + " ";
discrepancy = abs(target_probability - measured_probability)
if(discrepancy > margin):
self.fail(f"Test {description} failed. Qubit {i} had a |0> probability of " +
f"{measured_probability}, but it should have been {target_probability} " +
f"(with a margin of {margin}).")
# If the test passed, print the results.
target_string += "]"
result_string += "]"
print(target_string)
print(result_string)
print("Passed!")
print()
def identity_function(self, qubits):
"""
Applies the identity (I) gate to the qubits in the given register.
Parameters:
qubits (Qureg): The qubit register being tested
"""
# Note: ProjectQ doesn't actually have an I gate, so this test does absolutely
# nothing.
def test_identity(self):
"""
This tests the Identity gate, which does nothing. It's used to test that qubits
are initialized to |0〉 in ProjectQ.
"""
iterations = 10000
target_probabilities = [1]
self.run_test(self.identity_function, "Identity", 10000, target_probabilities, 0)
def invert_function(self, qubits):
"""
Applies the X gate to the register.
Parameters:
qubits (Qureg): The qubit register being tested
"""
for qubit in qubits:
X | qubit
def test_invert(self):
"""
This tests the X gate, which should flip qubits from |0〉 to |1〉. Each qubit should be |1〉
with 100% probability after this test.
"""
iterations = 10000
target_probabilities = [0, 0]
self.run_test(self.invert_function, "Invert", 10000, target_probabilities, 0)
def hadamard_function(self, qubits):
"""
Applies the H gate to the register.
Parameters:
qubits (Qureg): The qubit register being tested
"""
for qubit in qubits:
H | qubit
def test_hadamard(self):
"""
This tests the H gate, which should put the qubits in a uniform superposition of |0〉 to |1〉.
Each qubit should have a 50% chance of being |0〉.
"""
iterations = 10000
target_probabilities = [0.5, 0.5, 0.5, 0.5]
self.run_test(self.hadamard_function, "Hadamard", 10000, target_probabilities, 0.02)
def arbitrary_rotation_function(self, qubits):
"""
This function will perform rotations around the Bloch sphere so that each qubit has an evenly-
incrementing chance of being in the |0〉 state. For example, for 3 qubits, it will be
0% for the first, 50% for the second, and 100% for the third. For 4 qubits, it will be
0%, 33%, 66%, and 100%.
Parameters:
qubits (Qureg): The qubit register being tested
"""
# Calculate the probabilities for each qubit, and add the rotation gates
interval = 1 / (len(qubits) - 1)
for i in range(0, len(qubits)):
target_probability = i * interval
# To get this probability, we have to rotate around the Y axis
# (AKA just moving around on the X and Z plane) by this angle.
# The Bloch equation is |q> = cos(θ/2)|0> + e^iΦ*sin(θ/2)|1>,
# where θ is the angle from the +Z axis on the Z-X plane, and Φ
# is the angle from the +X axis on the X-Y plane. Since we aren't
# going to bring imaginary numbers into the picture for this test,
# we can leave Φ at 0 and ignore it entirely. We just want to rotate
# along the unit circle defined by the Z-X plane, thus a rotation
# around the Y axis.
#
# The amplitude of |0> is given by cos(θ/2) as shown above. The
# probability of measuring |0> is the amplitude squared, so
# P = cos²(θ/2). So to get the angle, it's:
# √P = cos(θ/2)
# cos⁻¹(√P) = θ/2
# θ = 2cos⁻¹(√P)
# Then we just rotate the qubit by that angle around the Y axis,
# and we should be good.
#
# See https://en.wikipedia.org/wiki/Bloch_sphere for more info on
# the Bloch sphere, and how rotations around it affect the qubit's
# probabilities of measurement.
angle = 2 * math.acos(math.sqrt(target_probability))
Ry(angle) | qubits[i]
def test_arbitrary_rotation(self):
"""
This tests arbitrary rotations around the Y axis (so the X-Z plane) to make sure ProjectQ
can deal with any given superposition.
"""
# This test is run a bunch of times on various intervals, ranging from 50% to 1/6
# (16.667%).
for i in range(2, 7):
interval = 1 / i # The amount to increase each qubit's probability by, relative to the previous qubit
step_string = "{:.4f}".format(100 / i) # The decimal representation of the interval, as a percent
target_probabilities = [0] * (i + 1) # This will store the desired probabilities of each qubit
for j in range(0, i + 1):
target_probability = j * interval
target_probabilities[j] = target_probability
# Run the test
self.run_test(self.arbitrary_rotation_function, f"Rotation with steps of 1/{i} ({step_string}%)", 2000, target_probabilities, 0.05)
if __name__ == '__main__':
unittest.main() |
985,265 | 15bddda5b51173794e11df0be3152cbd32770db1 | import cx_Oracle
import gpxpy.geo
import numpy as np
from time import time
totalExecutionTime = time()
fields = {
"face_id": 0,
"description": 1,
"label": 2,
"image": 3,
"path": 4,
"nbr_faces": 5,
"mimetype": 6,
"fileaccessdate": 7,
"filemodifydate": 8,
"filesize": 9,
"filetype": 10,
"filetypeextension": 11,
"imageheight": 12,
"imagewidth": 13,
"datetimeoriginal": 14,
"createdate": 15,
"modifydate": 16,
"exifimageheight": 17,
"exifimagewidth": 18,
"gpslongitude": 19,
"gpslatitude": 20,
"gpsaltitude": 21,
"gpsimgdirection": 22,
"gpslongituderef": 23,
"gpslatituderef": 24,
"gpsaltituderef": 25,
"gpsimgdirectionref": 26,
"gpstimestamp": 27,
"orientation": 28,
"flash": 29,
"make": 30,
"model": 31
}
connection = cx_Oracle.connect('python/python@127.0.0.1/xe')
cursor = connection.cursor()
cursor.execute("SELECT face_id, description, label, image, "
"path, "
"nbr_faces, "
"mimetype, "
"fileaccessdate, "
"filemodifydate, "
"filesize, "
"filetype, "
"filetypeextension, "
"imageheight, "
"imagewidth, "
"datetimeoriginal, "
"createdate, "
"modifydate, "
"exifimageheight, "
"exifimagewidth, "
"gpslongitude, "
"gpslatitude, "
"gpsaltitude, "
"gpsimgdirection, "
"gpslongituderef, "
"gpslatituderef, "
"gpsaltituderef, "
"gpsimgdirectionref, "
"gpstimestamp, "
"orientation, "
"flash, "
"make, "
"model FROM faces, images WHERE image = image_id AND label <> 'Others' AND treated = 0 ORDER BY face_id")
faces = cursor.fetchall()
counter = 1
for face in faces:
#############################################################################################################
f = open("./queries/" + face[fields["face_id"]] + ".txt", "w+")
#############################################################################################################
startTime = time()
print("Comparing labeled face " + str(counter) + " '" + face[fields["face_id"]] + "'")
# cursor.execute("SELECT face_id, description, label, image, "
# "path, "
# "nbr_faces, "
# "mimetype, "
# "fileaccessdate, "
# "filemodifydate, "
# "filesize, "
# "filetype, "
# "filetypeextension, "
# "imageheight, "
# "imagewidth, "
# "datetimeoriginal, "
# "createdate, "
# "modifydate, "
# "exifimageheight, "
# "exifimagewidth, "
# "gpslongitude, "
# "gpslatitude, "
# "gpsaltitude, "
# "gpsimgdirection, "
# "gpslongituderef, "
# "gpslatituderef, "
# "gpsaltituderef, "
# "gpsimgdirectionref, "
# "gpstimestamp, "
# "orientation, "
# "flash, "
# "make, "
# "model FROM faces, images WHERE image = image_id AND image <> '" + face[fields['image']] + "' AND face_id NOT IN (SELECT face1_id FROM faces_neighbors)")
cursor.execute("SELECT face_id, description, label, image, "
"path, "
"nbr_faces, "
"mimetype, "
"fileaccessdate, "
"filemodifydate, "
"filesize, "
"filetype, "
"filetypeextension, "
"imageheight, "
"imagewidth, "
"datetimeoriginal, "
"createdate, "
"modifydate, "
"exifimageheight, "
"exifimagewidth, "
"gpslongitude, "
"gpslatitude, "
"gpsaltitude, "
"gpsimgdirection, "
"gpslongituderef, "
"gpslatituderef, "
"gpsaltituderef, "
"gpsimgdirectionref, "
"gpstimestamp, "
"orientation, "
"flash, "
"make, "
"model FROM faces, images WHERE image = image_id AND image <> '" + face[fields['image']] + "' AND treated = 0")
sub_faces = cursor.fetchall()
print("Query time: " + str(time() - startTime) + " seconds")
# counter2 = 1
for sub_face in sub_faces:
# print("Comparing labeled face " + str(counter) + " '" + face[fields["face_id"]] + "' to face " + str(counter2) + " '" + sub_face[fields["face_id"]] + "'")
query = "INSERT INTO faces_neighbors(" \
"face1_id, " \
"face2_id, " \
"same_path, " \
"diff_nbr_faces, " \
"same_mimetype, " \
"diff_fileaccessdate, " \
"diff_filemodifydate, " \
"diff_filesize, " \
"same_filetype, " \
"same_filetypeextension, " \
"same_imageheight, " \
"same_imagewidth, " \
"diff_datetimeoriginal, " \
"diff_createdate, " \
"diff_modifydate, " \
"same_exifimageheight, " \
"same_exifimagewidth, " \
"distance, " \
"diff_gpsaltitude, " \
"diff_gpsimgdirection, " \
"same_gpslongituderef, " \
"same_gpslatituderef, " \
"same_gpsaltituderef, " \
"same_gpsimgdirectionref, " \
"same_orientation, " \
"same_flash, " \
"same_make, " \
"same_model, " \
"fr, " \
"neighbor)" \
" VALUES ('" + face[fields["face_id"]] + "', '" + sub_face[fields["face_id"]] + "'"
query += ", 1" if face[fields["path"]] == sub_face[fields["path"]] else ", 0"
query += ", " + str(abs(face[fields["nbr_faces"]] - sub_face[fields["nbr_faces"]]))
query += ", 1" if face[fields["mimetype"]] == sub_face[fields["mimetype"]] else ", 0"
query += ", " + str(int(abs(face[fields["fileaccessdate"]] - sub_face[fields["fileaccessdate"]]).total_seconds()))
query += ", " + str(int(abs(face[fields["filemodifydate"]] - sub_face[fields["filemodifydate"]]).total_seconds()))
query += ", " + str(abs(face[fields["filesize"]] - sub_face[fields["filesize"]]))
query += ", 1" if face[fields["filetype"]] == sub_face[fields["filetype"]] else ", 0"
query += ", 1" if face[fields["filetypeextension"]] == sub_face[fields["filetypeextension"]] else ", 0"
query += ", 1" if face[fields["imageheight"]] == sub_face[fields["imageheight"]] else ", 0"
query += ", 1" if face[fields["imagewidth"]] == sub_face[fields["imagewidth"]] else ", 0"
query += ", NULL" if face[fields["datetimeoriginal"]] == None or sub_face[fields["datetimeoriginal"]] == None else ", " + str(int(abs(face[fields["datetimeoriginal"]] - sub_face[fields["datetimeoriginal"]]).total_seconds()))
query += ", NULL" if face[fields["createdate"]] == None or sub_face[fields["createdate"]] == None else ", " + str(int(abs(face[fields["createdate"]] - sub_face[fields["createdate"]]).total_seconds()))
query += ", NULL" if face[fields["modifydate"]] == None or sub_face[fields["modifydate"]] == None else ", " + str(int(abs(face[fields["modifydate"]] - sub_face[fields["modifydate"]]).total_seconds()))
query += ", 1" if face[fields["exifimageheight"]] == sub_face[fields["exifimageheight"]] else ", 0"
query += ", 1" if face[fields["exifimagewidth"]] == sub_face[fields["exifimagewidth"]] else ", 0"
query += ", NULL" if face[fields["gpslongitude"]] == None or face[fields["gpslatitude"]] == None or sub_face[fields["gpslongitude"]] == None or sub_face[fields["gpslatitude"]] == None else ", " + str(int(gpxpy.geo.haversine_distance(face[fields["gpslongitude"]], face[fields["gpslatitude"]], sub_face[fields["gpslongitude"]], sub_face[fields["gpslatitude"]])))
query += ", NULL" if face[fields["gpsaltitude"]] == None or sub_face[fields["gpsaltitude"]] == None else ", " + str(int(abs(face[fields["gpsaltitude"]] - sub_face[fields["gpsaltitude"]])))
query += ", NULL" if face[fields["gpsimgdirection"]] == None or sub_face[fields["gpsimgdirection"]] == None else ", " + str(int(abs(face[fields["gpsimgdirection"]] - sub_face[fields["gpsimgdirection"]]))) if int(abs(face[fields["gpsimgdirection"]] - sub_face[fields["gpsimgdirection"]])) < 180 else ", " + str(360 - int(abs(face[fields["gpsimgdirection"]] - sub_face[fields["gpsimgdirection"]])))
query += ", NULL" if face[fields["gpslongituderef"]] == None or sub_face[fields["gpslongituderef"]] == None else ", 1" if face[fields["gpslongituderef"]] == sub_face[fields["gpslongituderef"]] else ", 0"
query += ", NULL" if face[fields["gpslatituderef"]] == None or sub_face[fields["gpslatituderef"]] == None else ", 1" if face[fields["gpslatituderef"]] == sub_face[fields["gpslatituderef"]] else ", 0"
query += ", NULL" if face[fields["gpsaltituderef"]] == None or sub_face[fields["gpsaltituderef"]] == None else ", 1" if face[fields["gpsaltituderef"]] == sub_face[fields["gpsaltituderef"]] else ", 0"
query += ", NULL" if face[fields["gpsimgdirectionref"]] == None or sub_face[fields["gpsimgdirectionref"]] == None else ", 1" if face[fields["gpsimgdirectionref"]] == sub_face[fields["gpsimgdirectionref"]] else ", 0"
query += ", NULL" if face[fields["orientation"]] == None or sub_face[fields["orientation"]] == None else ", 1" if face[fields["orientation"]] == sub_face[fields["orientation"]] else ", 0"
query += ", NULL" if face[fields["flash"]] == None or sub_face[fields["flash"]] == None else ", 1" if face[fields["flash"]] == sub_face[fields["flash"]] else ", 0"
query += ", NULL" if face[fields["make"]] == None or sub_face[fields["make"]] == None else ", 1" if face[fields["make"]] == sub_face[fields["make"]] else ", 0"
query += ", NULL" if face[fields["model"]] == None or sub_face[fields["model"]] == None else ", 1" if face[fields["model"]] == sub_face[fields["model"]] else ", 0"
face1 = np.fromstring(face[fields["description"]][1:-1], dtype=float, sep=" ")
face2 = np.fromstring(sub_face[fields["description"]][1:-1], dtype=float, sep=" ")
d = face1 - face2
diff = np.dot(d, d)
query += ", " + str(diff)
query += ", 1)" if face[fields["label"]] == sub_face[fields["label"]] else ", 0)"
################################################################################################
f.write(query + ";\n")
################################################################################################
# cursor.execute(query)
# counter2 += 1
# if counter2 == 5: break
########################################################################################################
f.close()
########################################################################################################
counter += 1
# if counter == 5: break
cursor.execute("UPDATE faces SET treated = 1 WHERE face_id = '" + face[fields["face_id"]] + "'")
connection.commit()
print("Processed in " + str(time() - startTime) + " seconds")
print("Cumulative execution time: " + str(time() - totalExecutionTime) + " seconds")
cursor.close()
connection.commit()
connection.close()
# CREATE TABLE faces_neighbors (
# face1_id VARCHAR2(25) NOT NULL,
# face2_id VARCHAR2(25) NOT NULL,
# same_path NUMBER(1) DEFAULT 0 NOT NULL,
# diff_nbr_faces NUMBER(2) DEFAULT 0 NOT NULL,
# same_mimetype NUMBER(1) DEFAULT 0 NOT NULL,
# diff_fileaccessdate NUMBER(9) DEFAULT 0 NOT NULL,
# diff_filemodifydate NUMBER(9) DEFAULT 0 NOT NULL,
# diff_filesize NUMBER(8) DEFAULT 0 NOT NULL,
# same_filetype NUMBER(1) DEFAULT 0 NOT NULL,
# same_filetypeextension NUMBER(1) DEFAULT 0 NOT NULL,
# same_imageheight NUMBER(1) DEFAULT 0 NOT NULL,
# same_imagewidth NUMBER(1) DEFAULT 0 NOT NULL,
# diff_datetimeoriginal NUMBER(9) DEFAULT NULL,
# diff_createdate NUMBER(9) DEFAULT NULL,
# diff_modifydate NUMBER(9) DEFAULT NULL,
# same_exifimageheight NUMBER(1) DEFAULT 0 NOT NULL,
# same_exifimagewidth NUMBER(1) DEFAULT 0 NOT NULL,
# distance NUMBER(8) DEFAULT NULL,
# diff_gpsaltitude NUMBER(8) DEFAULT NULL,
# diff_gpsimgdirection NUMBER(8) DEFAULT NULL,
# same_gpslongituderef NUMBER(1) DEFAULT NULL,
# same_gpslatituderef NUMBER(1) DEFAULT NULL,
# same_gpsaltituderef NUMBER(1) DEFAULT NULL,
# same_gpsimgdirectionref NUMBER(1) DEFAULT NULL,
# same_orientation NUMBER(1) DEFAULT NULL,
# same_flash NUMBER(1) DEFAULT NULL,
# same_make NUMBER(1) DEFAULT NULL,
# same_model NUMBER(1) DEFAULT NULL,
# fr FLOAT NOT NULL,
# neighbor NUMBER(1) DEFAULT 0 NOT NULL,
# CONSTRAINT faces_comp_pk PRIMARY KEY (face1_id, face2_id),
# CONSTRAINT face1_fk
# FOREIGN KEY (face1_id)
# REFERENCES faces(face_id)
# ON DELETE CASCADE,
# CONSTRAINT face2_fk
# FOREIGN KEY (face2_id)
# REFERENCES faces(face_id)
# ON DELETE CASCADE
# ); |
985,266 | 67c54437102ad42a97d9da0769cf49726ce89d5d | #!/usr/bin/python3
# -*-coding:utf-8 -*-
#Reference:**********************************************
# @Time : 2019/9/22 4:11 下午
# @Author : baozhiqiang
# @File : seepage_first.py
# @User : bao
# @Software: PyCharm
#Reference:**********************************************
import random
import math
import networkx as nx
import numpy
from munkres import print_matrix, Munkres
from collections import defaultdict
import commons
from random import sample
class FindSource:
def __init__(self):
self.initG = None # 原始图
self.findSource_list = None # 当前找到的源的list
self.findSource_set = None # 当前找到的源的set
self.infectG = None # 感染图
self.fix_number_source = 3 # 确定的源数目
self.source_list = None # 确定下来的源数目。
self.true_Source_list = None # 真实源点
self.netwrok_filename = None # 文件名字
self.infectG_list = None # 感染的多个图列表。
self.single_best_result = None
self.tempGraph = None # 临时生成传播子图
self.first_result_cost_list = None # 你求得第一个图的比较好距离。
self.all_result_cost_list = []
self.findSource_list = []
self.distance_error = None
def cal_ecctity(self):
# 构建传播子图,
singleRegionList = []
for node_index in list(self.infectG.nodes()):
if self.infectG.node[node_index]['SI'] == 2:
singleRegionList.append(node_index)
tempGraph = nx.Graph()
tempGraphNodelist = []
for edge in self.infectG.edges:
# if infectG.adj[edge[0]][edge[1]]['Infection']==2: #作为保留项。
if edge[0] in singleRegionList and edge[1] in singleRegionList:
tempGraph.add_edges_from([edge], weight=1)
tempGraphNodelist.append(edge[0])
tempGraphNodelist.append(edge[1])
self.tempGraph = tempGraph # 临时图生成
print('这个传播子图的节点个数,也是我们用来做u的备选集合的' + str(len(set(tempGraphNodelist))))
print('这个感染区域的传播图节点个数')
eccentricity_dict = nx.eccentricity(tempGraph)
# print(list(eccentricity_dict.items()))
# eccentricity_list= sorted(list(eccentricity_dict.items()), key= lambda x:x[1])
# print(eccentricity_list)
eccentri_dict = defaultdict(list)
for node_id, eccentric in eccentricity_dict.items():
eccentri_dict[eccentric].append(node_id)
print(eccentri_dict)
# 从偏心率大的考虑,先保存最大偏心度。
sort_eccentricity_dict = sorted(eccentri_dict.items(), key=lambda x: x[0], reverse=True)
max_eccentric = sort_eccentricity_dict[0][0]
print('输出最大的就是那个偏心率' + str(max_eccentric))
from random import sample
best_h = 0
M_dis = 0
best_h_node = []
min_cover = 100 # 某一层的覆盖率,肯定会比这个小。
tempGraph = self.infectG # 采用不同的感染图
for eccentric, node_list in sort_eccentricity_dict:
print('how to that')
print(eccentric, node_list)
M_dis = max_eccentric - eccentric # 最好的bFS树半径。
# 随机挑选k个点固定次数。
temp_all_cover = 0
temp_cover = 0
temp_ave_cover = 0
if len(node_list) > self.fix_number_source * 2: # 这一层只有大于3个点才可以。
itemNumber = int(len(node_list) / 10) # 层数越大,节点越多,应该采样越多才能逼近近似值。
for frequency in range(itemNumber): # 抽取10次,这里有问题,有些层数目多,怎么抽取会好点?按照层数抽取相应的次数会比较好点,公平。
slice = random.sample(node_list, self.fix_number_source)
temp_cover = commons.getSimilir1(slice, M_dis, singleRegionList, tempGraph)
temp_all_cover += temp_cover
if temp_all_cover != 0:
temp_ave_cover = temp_all_cover / itemNumber # 求出平均覆盖率。
print('temp_ave_cover', temp_ave_cover)
else:
temp_ave_cover = 0.1
if temp_ave_cover <= min_cover:
# 这一层表现优异,记下h,以及这一层的所有节点。
print('每次平均的覆盖率是' + str(min_cover))
print('temp_ave_cover', temp_ave_cover)
min_cover = temp_ave_cover
best_h_node = node_list
best_h = M_dis
print('输出表现优异同学,看看' + str(best_h_node), str(best_h))
# 得到最优层数解,再大量进行选择,使用jaya算法。构建大量样本。在固定h下的寻找最合适的节点。
'''
1 构建种群样本下
2 在固定h下更新
'''
fix_number_sourcetemp = self.fix_number_source
Sampleset = []
for i in range(50):
Sampleset.append(random.sample(best_h_node, self.fix_number_source))
infectG = self.infectG
min_cover = 1
min = 1
mincover = None
bestsourceNews = None
minCoverlist = []
for iter_number in range(4):
for sample_index in range(len(Sampleset)):
mincover = commons.getSimilir1(Sampleset[sample_index], best_h, singleRegionList,
tempGraph)
# 随机更换,看如何让变好
for j in range(1, 4, 1): # 随机变4次,只要能变好
# lateelement = [random.choice(best_h_node), random.choice(best_h_node),
# random.choice(best_h_node),random.choice(best_h_node)]
lateelement = [random.choice(best_h_node) for i in range(self.fix_number_source)]
# print('当前输入的后面list' + str(lateelement))
latemincover = commons.getSimilir1(lateelement, best_h, singleRegionList, tempGraph)
if mincover > latemincover:
mincover = latemincover # 有更好地就要替换
# print("要进行替换了" + str(Sampleset[sample_index]) + '被替换成lateelement')
Sampleset[sample_index] = lateelement # 替换
# print(Sampleset[sample_index])
# print('经过5次迭代之后的sample的list为多少呢?' + str(Sampleset))
# 计算样本集的similir,找出最好的。
for sources in Sampleset:
mincover = commons.getSimilir1(sources, best_h, singleRegionList, tempGraph)
if mincover < min:
min = mincover # 这一次最好的覆盖误差率
bestsourceNews = sources # 最好的覆盖误差率对应的最好的那个解。
print('得到多源点情况最小的覆盖率为' + str(bestsourceNews) + str(min))
minCoverlist.append([bestsourceNews, best_h, min])
print(minCoverlist)
result = sorted(minCoverlist, key=lambda x: (x[2]))
self.single_best_result = result[0]
def cal_reverse_algorithm(self, infectG):
resultSource = []
source = None
for index in range(len(self.single_best_result[0])):
source = commons.revsitionAlgorithm(self.single_best_result[0][index], self.single_best_result[1], infectG,
self.tempGraph)
resultSource.append(source)
print(resultSource)
self.findSource_list = resultSource
def main(self, dir):
'''
走来不要那么难,先搞定树吧。才能继续搞定图。
:return:
'''
pre = '../data/'
last = '.txt'
# filename = ''
self.initG = commons.get_networkByFile(fileName=pre + dir + last) # 获取图,
max_sub_graph = commons.judge_data(self.initG)
source_list = commons.product_sourceList(max_sub_graph, self.fix_number_source)
self.true_Source_list = source_list
self.infectG = commons.propagation1(self.initG, self.true_Source_list) # 开始传染
self.cal_ecctity() # 找到最好的覆盖率结果。
self.cal_reverse_algorithm(self.infectG) # 找到反转算法后的生成答案点
self.distance_error = commons.cal_distance(self.infectG, self.true_Source_list, self.findSource_list)
# return commons.cal_distance(self.infectG, self.true_Source_list,self.findSource_list)
'''
计算误差100次。
'''
def cal_distanceError(self, dir):
self.fix_number_source = 3
distance = 0
for i in range(10):
self.main(dir)
distance += self.distance_error
result = distance / 10
# 导入time模块
import time
# 打印时间戳
# print(time.time())
pre = './result/'
last = '.txt'
with open(pre + dir + 'first' + last, 'a') as f:
f.write(str(time.asctime(time.localtime(time.time()))) + '\n')
f.write(str(10) + ' ' + str(result))
print(distance / 10)
'''
2 计算图的所有点偏心率,The eccentricity of a node v is the maximum distance from v to
all other nodes in G.
3 按照什么来分级,从而针对外围边进行滲流理论。
4 进行滲流后,使用最好的那个中心性。进行多源定位。
5 可选的有先按照偏心率分级/先单源定位/按照中心性分级,进行多源定位。
'''
test = FindSource()
filename = 'CA-GrQc'
test.cal_distanceError(filename) |
985,267 | ef2ca72dffb245f619546fe14f336e9517b96ff0 | #!/usr/bin/env python3
# Merge the 2 files created by the DAQ into one
import sys
import argparse
import time
import os.path;
import h5py;
import numpy as np;
import collections;
import re;
# change this to insert ref cols with value zero at the left if you want.
numrefcols = 32;
def splitGFC(pval):
imgGn= pval >> 13;
imgFn= (pval >> 5) & 0xff;
imgCrs= pval & 0x1f;
return imgGn, imgFn, imgCrs;
# filename1 and filename2 do need to be in the right order
def merge_files_and_save(filename1, filename2, outfile, delete=True, split=False):
f1 = h5py.File(filename1, "r");
f2 = h5py.File(filename2, "r");
of = h5py.File(outfile, "w");
for dset in ["data", "reset", "ecount"]:
if dset in f1:
shape1 = f1[dset].shape;
if shape1[1] != 1484 or shape1[2] != 1408:
print("oh dear wrong shape on input", shape1);
exit(1);
# we are going to expand the frame to have ref cols at the left
numfr1 = shape1[0];
numfr2 = f2[dset].shape[0];
outshape = (numfr1+numfr2, 1484, 1408 + numrefcols);
dtp = "float32" if dset=="ecount" else "uint16";
output = np.zeros(outshape, dtype=dtp);
for i in range(0, numfr1):
output[i*2,:,numrefcols:] = f1[dset][i,:,:];
for i in range(0, numfr2):
output[i*2+1,:, numrefcols:] = f2[dset][i,:,:];
# output[0::2,:,:] = f1[:,:,:]; would also work
of.create_dataset(dset, data = output);
if(split):
if dset in ["data", "reset"]:
gain, fine, crs = splitGFC(output);
of.create_dataset(dset + "-gain", data=gain);
of.create_dataset(dset + "-coarse", data=crs);
of.create_dataset(dset + "-fine", data=fine);
# should also do info field here.
if delete:
print("deleting {} and its pair".format(filename1));
os.remove(filename1);
os.remove(filename2);
def options():
desc = "Script to combine files like blah_0001.h5 and blah_0002.h5 by interleaving frames. datasets data and reset are kept. Frame-numbering is preserved.";
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-l", "--label", default="VINSCAN", help="filename label to search for in the filename (default VINSCAN)");
parser.add_argument("-i", "--indir", default="/dls/detectors/Percival/captures", help="folder for h5 files input\n (default /dls/detectors/Percival/captures)")
parser.add_argument("--no-delete", action="store_false", help="delete original capture files after merge (default on)", default=True, dest="delete")
parser.add_argument("-o", "--outdir", default="", help="folder for h5 files output (default same as indir)")
parser.add_argument("-s", "--split", default=False, action="store_true", help="add datasets for g,f,c too");
parser.add_argument("-v", "--verbose", help="verbose logging (default F)", action="store_true", default=False);
parser.add_argument("-m", "--maxfiles", help="max number of files to create", type=int, default=100000);
args = parser.parse_args()
return args
def main():
args = options();
indir = "";
if os.path.isdir(args.indir):
indir = args.indir;
else:
print ("invalid directory ", args.indir);
exit(1);
outdir = args.indir;
if os.path.isdir(args.outdir):
outdir = args.outdir;
elif args.outdir:
print ("invalid directory ", args.outdir);
exit(1);
allfiles = collections.OrderedDict();
for filename in sorted(os.listdir(indir)):
if args.label in filename and not "combined" in filename:
allfiles[filename] = 1;
# in the odin-data update to 1.8, the numbering started at zero instead of 1.
count = 0;
for filename1 in allfiles.keys():
if filename1.endswith("01.h5"):
filename_oth = filename1.replace("01.h5", "02.h5");
if(filename_oth not in allfiles):
filename_oth = filename1.replace("01.h5", "00.h5");
filename1, filename_oth = filename_oth, filename1;
if os.path.exists(os.path.join(indir,filename1)) and os.path.exists(os.path.join(indir,filename_oth)) and count < args.maxfiles:
outname = re.sub("\d{6}\.h5", "combined.h5", filename1);
if False and os.path.exists(os.path.join(outdir, outname)):
print("already done ", outname);
else:
print("Merging{} {} and {} into\n *** {}".format("+s" if args.split else "",filename1, filename_oth, outname));
merge_files_and_save(os.path.join(indir, filename1), os.path.join(indir, filename_oth),
os.path.join(outdir, outname), args.delete, args.split);
count += 1;
if __name__ == '__main__':
main()
|
985,268 | a7a2e8817aef277fad97b9f096fc3b761db4d23e | import string
def display_word(word, guesses):
word = word.upper()
guesses = [guess.upper() for guess in guesses]
display_letters = []
for letter in word:
if letter in guesses:
display_letters.append(letter)
else:
display_letters.append('_')
return " ".join(display_letters)
def is_valid_letter(user_input):
return len(user_input) == 1 and user_input in string.ascii_letters
def has_been_guessed(letter, correct_guesses, incorrect_guesses):
return letter in correct_guesses + incorrect_guesses
class Game:
def __init__(self, word):
self.word = word
self.correct_guesses = []
self.incorrect_guesses = []
self.max_incorrect_guesses = 8
def is_game_over(self):
if self.have_all_letters_been_guessed():
return True
return len(self.incorrect_guesses) >= self.max_incorrect_guesses
def have_all_letters_been_guessed(self):
# alternative
# return all([letter in self.correct_guesses for letter in self.word])
for letter in self.word:
if letter not in self.correct_guesses:
return False
return True
def add_guess(self, guess):
"""Given a guessed letter, add it to correct or incorrect guesses."""
guess = guess.lower()
if guess in self.word.lower():
self.correct_guesses.append(guess)
else:
self.incorrect_guesses.append(guess)
def get_user_guess(self):
while True:
guess = input("What is your guess? ")
if is_valid_letter(guess):
return guess
print("That's not a valid answer!")
def run(self):
print("Welcome to Mystery Word!")
while not self.is_game_over():
print(
f"You have {self.max_incorrect_guesses - len(self.incorrect_guesses)} guesses left."
)
print(display_word(self.word, self.correct_guesses))
guess = self.get_user_guess()
self.add_guess(guess)
if self.is_game_over():
if self.have_all_letters_been_guessed():
print("You won!")
else:
print("You are the loser!")
def __repr__(self):
return f"<Game word={self.word}>"
if __name__ == "__main__":
game = Game("beluga")
game.run()
|
985,269 | 0a3db9ecb245c9ae43be0bcff67e1f79a7fd5058 | config_platform = 'win32'
#config_platform = 'android'
#config_platform = 'linux'
|
985,270 | 34b7c03017a55583f31bbcdb70c5b0411d6725a0 | import json
import requests
import time
import urllib3
urllib3.disable_warnings()
api_url_base = 'https://api.mgmt.cloud.vmware.com/'
headers = {'Content-Type': 'application/json'}
refresh_token = ''
def extract_values(obj, key):
"""Pull all values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results
def get_token():
api_url = 'https://console.cloud.vmware.com/csp/gateway/am/api/auth/api-tokens/authorize?refresh_token={0}'.format(refresh_token)
response = requests.post(api_url, headers=headers, verify=False)
if response.status_code == 200:
json_data = json.loads(response.content.decode('utf-8'))
key = json_data['access_token']
return key
else:
return None
access_key = get_token()
headers1 = {'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(access_key)}
def get_blueprints():
api_url = '{0}blueprint/api/blueprints'.format(api_url_base)
response = requests.get(api_url, headers=headers1, verify=False)
if response.status_code == 200:
json_data = json.loads(response.content.decode('utf-8'))
print('OK')
else:
return None
print(response.text)
get_blueprints() |
985,271 | 5c91554fad9f3cc8beb771ceff18568b378ff7c9 | """
===============
author:Administrator
time:13:45
E-mail:1223607348@qq.com
===============
""" |
985,272 | 64a0b9118516625ec5b509c23f21e87e1f21e565 | #!/usr/bin/env python
# coding: utf-8
# # Software Development 1 Courserwork
# In[54]:
#EXERCISE I
#1. Asks the user to enter a number “x”
x = int (input ("10"))
# In[55]:
#2. Asks the user to enter a number "y"
y = int (input ("5"))
# In[56]:
print (x)
# In[57]:
print (y)
# In[58]:
#3a. The sum of x and y
print (x+y)
# In[59]:
#3b Asks the user to enter a number “y”
print (x-y)
# In[60]:
#3c. The product of x and y
print (x*y)
# In[61]:
#3d. The quotient when x is divided by y
print (x/y)
# In[62]:
#3e. The remainder when x is divided by y
print (x%y)
# In[63]:
#3f The result of log10x
import math #importin
print (math.log10(x))
# In[64]:
#3g The result of x**y
print (x**y)
# In[3]:
#EXERCISE II
#Write a program that asks the user to enter the width and length of a room in meters. Once these values have been
#read (as floating-point numbers), your program should compute and display the area of the room. Include units in your
#prompt and output message.
#Asking user to enter the width
width = float (input ('1.5'))
#Asking user to enter the length
length = float (input ("5.6"))
#To calcute the total of area, we will have to multiply the width by the length
#When the area is included in the print function then it multiplies and it's inlcuded in the result when we run
#the cell
area = width * length
print ("The total area of the room is", area, "meters")
# In[8]:
#EXERCISE III: CHINESE ZODIAC ASSIGNS
year = int(input("Enter birth year: ")) #When we run the program it's going ask to enter the number of the year
#which will result to showing us the respective chinese zodiac sign.
#The years are 2000 to 2011
#I will start the cycle with the year 1999 as it's when the previous cycle ended.
zodiacYear = year % 12
if (zodiacYear %12 == 0):
print ("Hare")
elif (zodiacYear %12 == 1):
print ("Dragon")
elif (zodiacYear %12 == 2):
print ("Snake")
elif (zodiacYear %12 == 3):
print ("Horse")
elif (zodiacYear %12 == 4):
print ("Sheep")
elif (zodiacYear %12 == 5):
print ("Monkey")
elif (zodiacYear %12 == 6):
print ("Rooster")
elif (zodiacYear %12 == 7):
print ("Pig")
elif (zodiacYear %12 == 8):
print ("Rat")
elif (zodiacYear %12 == 9):
print ("Ox")
elif (zodiacYear %12 == 10):
print ("Tiger")
elif (zodiacYear %12 ==11):
print ("Dog")
elif (zodiacYear %12 ==12):
print ("Hare")
#end
# In[5]:
#EXERCISE IV: MULTIPLICATION TABLE
#I will be using the def function, creating nested loops and asking the user to add a value and return it
#in order to display the multiplication table.
def nested_loop(start, end):
# printing the top header
print('', end='\t')
for i in range(start, end +1):
print(i, end='\t')
print('')
for i in range(start, end + 1):
print(i, end='\t') # printing the running column
for j in range(start, end + 1):
print(i * j, end='\t')
print('')
def get_value(label):
print("Enter " + label + " value:")
value = int(input())
return value # returning the calculation
def table():
start = get_value("starting")
end = get_value("ending")
nested_loop(start, end)
table ()
#To diplay multiplication table below, please run the code, add "1" as starting value and "10" as the ending value
#end
# In[1]:
#EXERCISE V - House Purchase Calculationsδ
#Assigning values:
cost_of_home = 110000.0
down_payment = 110000.0 * 0.25 #110000.0 is the total cost of the future home times 25% (0.25)
#which I have turned into a float.
annual_salary = 38000.0
portion_of_the_salary_saved = 0.1 #10% turned in decimals (float)
print ("The portion of the down payment is","£", down_payment)
savings= 0
r = 0.04
monthly_salary = (annual_salary / 12.0)
print (monthly_salary)
monthly_savings = monthly_salary*0.1
print ("The monthly savings are", monthly_savings)
#Using the float function and the input function to ask the user for input of the following:
cost_of_home = float (input("110000.0"))#Asking the user for input on the cost of my future home
print ("The cost of the house is","£",cost_of_home ) #Printing a sentence that states the cost of the house
annual_salary = float (input (38000.0))#Asking the user for input of my starting annual salary.
print ("My starting annual salary is","£", annual_salary) #Printing a sentence that states my annual salary.
salary_saved = float (input (0.1)) #Asking the user for input of my saved salary
print ("The portion of my salary saved is", salary_saved) #Printing a sentence that states my annual salary.
months = 0
# Want to exit the loop when there is enough savings for a down payment
while savings < down_payment:
savings += savings * (0.4 / 12) # Monthly interest
savings += salary_saved # Monthly savings
months += 1
print("It will take {} months to save!".format(months))
|
985,273 | df334ed4d4c0ae915e1ad7c656222833d4e89047 | __all__ = ["setVelocity", "shortest_angular_distance",
"targestCollectedHandler", "modeHandler", "joyCmdHandler",
"publishStatusTimerEventHandler", "targetHandler", "odometryHandler",
"obstacleHandler", "mobilityStateMachine", "shutdown"]
|
985,274 | 96fe95def71c8334c1211e8c4bc654ae4aaf7cad | pilha = []
def empilhar(_pilha, numero):
'''
empilhar (ou push, em inglês) adiciona um elemento no topo da lista
'''
#o final da lista é o topo da pilha
_pilha.append(numero)
def desempilhar(_pilha):
'''
desempilihar (ou pop, em inglês) retira um elemento do topo da pilha e retorna seu valor
'''
val = _pilha[-1]
del _pilha[-1]
return val
empilhar(pilha, 10)
empilhar(pilha, 17)
empilhar(pilha, 20)
print(pilha)
print(desempilhar(pilha))
print(pilha) |
985,275 | e090d7f0ea7c68eb961ad078550d469b4ce77394 | from math import sqrt as root
def get_fibonacci_at_pos(idx):
if idx==0:return 0
elif idx<3:return 1
else:return get_fibonacci_at_pos(idx-2)+get_fibonacci_at_pos(idx-1)
def is_prime(num):
if num<2:return False
else:
for i in range(2,int(root(num)+1)):
if num%i == 0:
return False
return True
n = abs(int(input("Enter the value of 'n' > ")))
for i in range(1,n+1):
f = get_fibonacci_at_pos(i)
if is_prime(f) or f%5 == 0:
print(0,end=" ")
else:
print(f, end=" ")
print()
|
985,276 | ae8dfb27da334af995b20ee1738cfb336d668ceb | import random
def createCards():
colors=['红','黑','梅','方']
values ='23456789IJQKA'
cards = []
for c in colors:
for i in values:
card = c + i
cards.append(card)
print(cards)
print(len(cards))
return cards
def shuffleCards(cards):
beforecards = cards
aftercards = []
for i in range(len(cards)):
randomCard = beforecards[random.randint(0,len(beforecards)-1)]
print('randomCard=',randomCard)
beforecards.remove(randomCard)
aftercards.append(randomCard)
print(aftercards)
print(len(aftercards))
return aftercards
def main():
cards = createCards()
shuffleCards(cards)
if __name__ == "__main__":
main() |
985,277 | 7fe77273eb0d99073a8e07b45b42e645d011f9d1 | from address_book.controls.search_functions import *
from address_book.controls.validation_functions import validate_the_menu_action_loop
from address_book.controls.validation_functions import validate_required_fields_not_empty
from address_book.controls.global_var import answer_yes
from address_book.controls.global_var import answer_no
from address_book.controls.global_var import error_message_for_answer
from address_book.controls.global_var import value_for_yes_no_validation
def search_contact(address_book):
menu = "1. SEARCH IN ALL FIELDS\n2. SEARCH BY NAME\n3. SEARCH BY LAST NAME"
input_message = "Please choose the desired action by entering the number accordingly.\n>>> "
action = validate_the_menu_action_loop(menu, input_message)
search_key = validate_required_fields_not_empty("Please enter your search query.\n>>> ")
print("Do you what to search for a full match with entered query?")
while value_for_yes_no_validation not in answer_yes + answer_no:
is_full_search = input("Please enter Y/Д/Т to search for a full match, and N/Н to use partial search.\n>>> ")
if is_full_search in answer_yes:
found_contacts = search_contact_by_attr_values(search_key, address_book, action, True)
break
elif is_full_search in answer_no:
found_contacts = search_contact_by_attr_values(search_key, address_book, action, False)
break
else:
print(error_message_for_answer)
continue
return show_search_results(found_contacts, search_key)
|
985,278 | 41a66bbdd2255ee2d0f5ffa0b5f544f12b84f9d2 | def timeConversion(s):
hour = s[:2]
if s[-2:] == 'AM' and s[:2] == '12':
hour = '00'
elif s[-2:] == 'PM' and s[:2] != '12':
hour = str(int(hour) + 12)
return hour + s[2:-2]
assert timeConversion('12:05:45AM') == '00:05:45'
assert timeConversion('01:05:45AM') == '01:05:45'
assert timeConversion('12:05:45PM') == '12:05:45'
assert timeConversion('01:05:45PM') == '13:05:45'
assert timeConversion('09:05:45PM') == '21:05:45' |
985,279 | 5c10f99da3c1cc54eb4deb42e6cbce5a2a4935b4 |
from basic_robot.irproximity_sensor import IRProximitySensor
class StayLeft():
def __init__(self):
self.Is_Left = IRProximitySensor.get_value()[1]
def What_to_do(self):
if self.Is_Left == True:
return "Right"
else:
return "Left"
class StayRight():
def __init__(self):
self.Is_Right = IRProximitySensor.get_value()[0]
def What_to_do(self):
if self.Is_Right == True:
return "Left"
else:
return "Right"
class Staybetween():
def __init__(self):
self.Between = IRProximitySensor.get_value()
def What_to_do(self):
if (self.Between[0] ==True) and (self.Between[1] == True):
return "Straight"
elif (self.Between[0] == False) and (self.Between[1] == True):
return "Right"
elif (self.Between[0] ==True) and (self.Between[1] == False):
return "Left"
else:
#YEAH WHAT NOW??
pass
|
985,280 | 292315dbb97d0b97fd91f2dfb2f1b15f92e7a004 | T = int(input())
dix=[-1,1,0,0]
diy=[0,0,-1,1]
for t in range(T):
n,m,k=map(int,input().split(' '))
board=[]
vis=[[0 for i in range(n)]for j in range(n)]
for i in range(k):
board.append([int(i) for i in input().split(' ')])
for time in range(m):
vis = [[0 for i in range(n)]for j in range(n)]
for boa in board:
flag= True
for dir in range(1,5):
if dir==boa[3]:
nx = boa[0]+dix[dir-1]
ny = boa[1]+diy[dir-1]
# print(board[i],nx,ny)
if nx>=n-1 or ny>=n-1 or nx<=0 or ny<=0:
if boa[3] == 1:
boa[3]= 2
elif boa[3] == 2:
boa[3] = 1
elif boa[3] == 3:
boa[3] = 4
elif boa[3] == 4:
boa[3] = 3
boa[2] = int(boa[2]/2)
if boa[2] == 0:
board.pop(board.index(boa))
flag = False
if flag == False:
continue
boa[0]=nx
boa[1]=ny
vis[nx][ny]+=1
for i in range(n):
for j in range(n):
if vis[i][j] >1:
temp=[]
for bo in board:
if bo[0]==i and bo[1]==j:
temp.append(board.index(bo))
num = -1
total=0
for l in temp:
total+=board[l][2]
num = max(board[l][2],num)
for l in temp:
if board[l][2] == num:
board[l][2] = total
temp.pop(temp.index(l))
# print(temp)
while len(temp) != 0:
board.pop(temp.pop())
# print()
# print()
# for i in range(len(board)):
# print(board[i])
# for i in range(len(board)):
# print(board[i])
ans=0
for i in board:
ans+=i[2]
print('#',t+1,' ',ans,sep='')
|
985,281 | 6db68786479ad24cb9aeb3b0bd090db06a9ccbe7 | import queue
import os
class Cooooooooooooooooooooore:
def __init__(self, getFilePath):
self.hitKillList = queue.Queue()
self.getFilePath = getFilePath
def getHitKillList(self):
file = open(self.getFilePath, 'r', encoding='utf-8')
for i in file.readlines():
self.hitKillList.put(i.strip('\n'))
return self.hitKillList
def getVuuuuuuuln(self):
while True:
if self.hitKillList.empty():
print('[-]字典为空')
return
else:
'''
good luck :)
'''
file = open('./ScannedDomain.txt', 'a', encoding='utf-8')
file.writelines(self.hitKillList.get() + '\n')
Command = '{} -t {} -http-proxy 127.0.0.1:7777 '.format('rad_windows_amd64.exe', self.hitKillList.get())
os.system(Command)
print('[+]{}扫描完毕'.format(self.hitKillList.get()))
file.close()
if __name__ == '__main__':
start = Cooooooooooooooooooooore('./ForHitKill/edu.txt')
start.getHitKillList()
start.getVuuuuuuuln()
|
985,282 | 6dfa4171761f0205c67c268f7bcd00ce280d0e25 | import os
import subprocess
import shutil
from model.common.file_reader import FileReader
from model.common.file_writer import FileWriter
class ClientManager(object):
def __init__(self, name, context, others, logWnd):
self.name = name
self.context = context
self.script_path = others["simulator"]["script"]
self.simulator = others["simulator"]["path"]
self.run_dir = os.path.dirname(os.path.abspath(self.simulator))
self.account = context["account"]
self.password = context["password"]
self.config_file = os.path.join(self.run_dir, "windows.ini")
self.is_console_expected = context["is_console_expected"].lower() == "yes"
self.logWnd = logWnd
self.fileWriter = None
self.fileReader = None
self.logfile = os.path.join(others["logdir"], "{}.log".format(name))
self.proc = None
def __del__(self, *args):
self.close()
def close(self):
self.stop()
def _closeLogFileHandlers(self):
if self.fileWriter is not None:
self.fileWriter.close()
self.fileWriter = None
if self.fileReader is not None:
self.fileReader.close()
self.fileReader = None
def isRunning(self):
return self.proc is not None
def stop(self):
self._closeLogFileHandlers()
self.logWnd.info("停止进程 "+self.name)
if self.proc is not None:
try:
os.kill(self.proc.pid, 9)
except PermissionError:
self.logWnd.warn("未能杀死进程 {name} 或者该进程不存在".format(name=self.name))
self.proc = None
def syncLogToScreenFromFile(self):
if self.fileReader is None: return
try:
self.logWnd.writelines(self.fileReader.readlines())
except Exception as e:
self.logWnd.warn(str(e))
def _precheck(self):
if self.isRunning():
self.logWnd.error(self.name+" 还在运行中")
return False
if not os.path.isdir(self.run_dir):
self.logWnd.error("执行路径不存在: "+self.run_dir)
return False
if (not os.path.isfile(self.simulator)) or (not os.path.isfile(self.config_file)):
self.logWnd.error("模拟器 {} 或者配置文件 {} 不存在!".format(self.simulator, self.config_file))
return False
return True
def _startupLogEnvironment(self):
try:
self.fileWriter = FileWriter(self.logfile)
self.fileReader = FileReader(self.logfile)
return True
except IOError as e:
self.logWnd.error(str(e))
self.logWnd.error("进程 {} 无法启动".format(self.name))
return False
def _launchSubprocess(self, cmd):
import sys
if sys.version_info.major == 2:
self.logWnd.error("不再支持Python 2.x")
return None
if sys.version_info.minor <= 6:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
return subprocess.Popen(cmd, stdout=self.fileWriter, stderr=self.fileWriter, startupinfo=startupinfo)
else: return subprocess.Popen(cmd, stdout=self.fileWriter, stderr=self.fileWriter, creationflags=subprocess.CREATE_NO_WINDOW)
def _write_client_information_into_config_file(self):
lines = []
with open(self.config_file, "r", encoding="utf-8") as fd:
lines = fd.readlines()
is_read_user_info = False
NECCESSARY_USER_INFO_ITEM_NUM = 2
modified_user_info_item_num = 0
for i in range(len(lines)):
if lines[i].rstrip() == "[user]":
is_read_user_info = True
continue
if is_read_user_info:
if lines[i].find("name=") == 0:
lines[i] = "name=" + self.account
modified_user_info_item_num += 1
elif lines[i].find("password=") == 0:
lines[i] = "password=" + self.password
modified_user_info_item_num += 1
if NECCESSARY_USER_INFO_ITEM_NUM == modified_user_info_item_num: break
FileWriter(self.config_file, encoding="utf-8").writelines(lines)
def run(self):
if not self._precheck(): return
if not self._startupLogEnvironment(): return
self._write_client_information_into_config_file()
cwd = os.getcwd()
os.chdir(self.run_dir)
self.proc = self._launchSubprocess("{} {} -workdir {}".format(self.simulator, "" if self.is_console_expected else "-console disable", self.script_path))
os.chdir(cwd)
if self.proc is None: return
self.logWnd.info("进程 {} 开始运行".format(self.name))
|
985,283 | f3bd54471f466fdb8106a77310a2a66bfb120d1d | #!/usr/bin/env python
# encoding:UTF-8
from time import sleep
import random
import logging
logging.basicConfig(level=logging.INFO,format="%(asctime)s.%(msecs)03d[%(levelname)-8s]:%(created).6f %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
while True:
sleep(15-(int(time.time())+2) % 15)
logging.info(f"{int(time.time()) % 10}")
print("hello~")
logging.error("出现了错误")
sleep(1)
logging.info("bs_amount_ratio is 1.834679120207966")
sleep(1)
logging.warning("警告信息")
sleep(1)
logging.critical ("critical")
|
985,284 | e37b80318ce8413d153ec126abc9494f0e260a77 | n=eval(input())
a,b=0,0
for i in range(0,len(n)-1):
if n[i]>n[i+1]:
b+=1
for j in range(i+1,len(n)):
if n[i]>n[j]:
a+=1
print(a==b) |
985,285 | d61ecef0630976a75751d8a8769392bcf2a95023 | #!/usr/bin/env python3
import sys
import numpy
import scipy.stats
f = open('exercise_01_output', 'w')
for i, line in enumerate(sys.stdin):
columns = line.rstrip("\n").split()
for column in columns[:3]:
if "DROME" in column:
if len(columns) < 4:
continue
#print(columns[3], columns[1], sep='\t')
f.write(columns[-1] + "\t" + columns[-2] + "\n")
# f.write(last, second to last)
|
985,286 | 1bc4dec1e5e8cff34a1e1ba92f98cc16f9a89788 | import time
import numpy as np
from struct import *
import csv
from typing import NamedTuple
import array
import re
import os
import sys
import platform
import inspect
import usb.core
import usb.util
import usb.backend.libusb1
from pathlib import *
from hardwarelibrary.physicaldevice import PhysicalDevice
from hardwarelibrary.spectrometers.viewer import *
class NoSpectrometerConnected(RuntimeError):
pass
class UnableToInitialize(RuntimeError):
pass
class UnableToCommunicate(RuntimeError):
pass
class SpectrumRequestTimeoutError(RuntimeError):
pass
class Spectrometer(PhysicalDevice):
idVendor = None
idProduct = None
def __init__(self, serialNumber=None, idProduct:int = None, idVendor:int = None):
PhysicalDevice.__init__(self, serialNumber=serialNumber, idProduct=idProduct, idVendor=idVendor)
self.model = ""
self.wavelength = np.linspace(400,1000,1024)
self.integrationTime = 10
def getSerialNumber(self):
fctName = inspect.currentframe().f_code.co_name
raise NotImplementedError("Derived class must implement {0}".format(fctName))
def getSpectrum(self) -> np.array:
fctName = inspect.currentframe().f_code.co_name
raise NotImplementedError("Derived class must implement {0}".format(fctName))
def display(self):
""" Display the spectrum with the SpectraViewer class."""
viewer = SpectraViewer(spectrometer=self)
viewer.display()
def getIntegrationTime(self):
return self.integrationTime
def setIntegrationTime(self, value):
self.integrationTime = value
def saveSpectrum(self, filepath, spectrum=None, whiteReference=None, darkReference=None):
""" Save a spectrum to disk as a comma-separated variable file.
If no spectrum is provided, request one from the spectrometer withoout
changing the integration time.
Parameters
----------
filepath: str
The path and the filename where to save the data. If no path
is included, the file is saved in the current directory
with the python script was invoked.
spectrum: array_like
A spectrum previously acquired or None to request a new spectrum
whiteReference: array_like
A white reference to normalize the measurements
darkReference: array_like
A dark reference for baseline
"""
try:
if spectrum is None:
spectrum = self.getSpectrum()
if darkReference is None:
darkReference = [0]*len(spectrum)
if whiteReference is None:
whiteReference = [1]*len(spectrum)
with open(filepath, 'w', newline='\n') as csvfile:
fileWrite = csv.writer(csvfile, delimiter=',')
fileWrite.writerow(['Wavelength [nm]','Intensity [arb.u]','White reference','Dark reference'])
for x,y,w,d in list(zip(self.wavelength, spectrum, whiteReference, darkReference)):
fileWrite.writerow(["{0:.2f}".format(x),y,w,d])
except Exception as err:
print("Unable to save data: {0}".format(err))
@classmethod
def supportedClasses(cls):
supportedClasses = []
for c in getAllSubclasses(Spectrometer):
classSearch = re.search(r'\.(USB.*?)\W', "{0}".format(c), re.IGNORECASE)
if classSearch:
supportedClasses.append(c)
return supportedClasses
@classmethod
def supportedClassNames(cls):
supportedClasses = []
for c in getAllSubclasses(Spectrometer):
classSearch = re.search(r'\.(USB.*?)\W', "{0}".format(c), re.IGNORECASE)
if classSearch:
supportedClasses.append(classSearch.group(1))
return supportedClasses
@classmethod
def showHelp(cls, err=None):
print("""
There may be missing modules, missing spectrometer or anything else.
To use this `{0}` python script, you *must* have:
1. PyUSB module installed.
This can be done with `pip install pyusb`. On some platforms, you
also need to install libusb, a free package to access USB devices.
On Windows, you can leave the libusb.dll file directly in the same
directory as this script. If no spectrometers are detected, it is
possible the problem is due to libusb.dll not being in the directory
where `{0}` was called.
2. A backend for PyUSB.
PyUSB does not communicate by itself with the USB ports of your
computer. A 'backend' (or library) is needed. Typically, libusb is
used. You must install libusb (or another compatible library). On
macOS: type `brew install libusb` (if you have brew). If not, get
`brew`. On Windows/Linux, go read the PyUSB tutorial:
https://github.com/pyusb/pyusb/blob/master/docs/tutorial.rst
If you have libusb.dll on Windows, keep it in the same
directory as {0} and it should work.
3. matplotlib module installed
If you want to use the display function, you need matplotlib.
This can be installed with `pip install matplotlib`
4. Tkinter module installed.
If you click "Save" in the window, you may need the Tkinter module.
This comes standard with most python distributions.
5. Obviously, a connected Ocean Insight or StellarNet spectrometer. It really needs to be
a supported spectrometer ({1}). The details of all
the spectrometers are different (number of pixels, bits, wavelengths,
speed, etc...). More spectrometers will be supported in the future.
Look at the class USB2000 to see what you have to provide to support
a new spectrometer (it is not that much work, but you need one to test).
""".format(__file__, ', '.join(Spectrometer.supportedClassNames)))
# Well, how about that? This does not work in Windows
# https://stackoverflow.com/questions/2330245/python-change-text-color-in-shell
# if sys.stdout.isatty:
# err = '\x1b[{0}m{1}\x1b[0m'.format(';'.join(['33','1']), err)
print(""" There was an error when starting: '{0}'.
See above for help.""".format(err))
@classmethod
def displayAny(cls):
spectrometer = cls.any()
if spectrometer is not None:
SpectraViewer(spectrometer).display()
@classmethod
def any(cls) -> 'Spectrometer':
""" Return the first supported spectrometer found as a Python object
that can be used immediately.
Returns
-------
device: subclass of Spectrometer
An instance of a supported spectrometer that can be used immediately.
"""
devices = cls.connectedUSBDevices()
for device in devices:
for aClass in cls.supportedClasses():
if device.idProduct == aClass.classIdProduct:
return aClass(serialNumber="*", idProduct=device.idProduct, idVendor=device.idVendor)
if len(devices) == 0:
raise NoSpectrometerConnected('No spectrometer connected.')
else:
raise NoSpectrometerConnected('No supported spectrometer connected. The devices {0} are not supported.'.format(devices))
@classmethod
def connectedUSBDevices(cls, idProduct=None, serialNumber=None):
"""
Return a list of supported USB devices that are currently connected.
If idProduct is provided, match only these products. If a serial
number is provided, return the matching device otherwise return an
empty list. If no serial number is provided, return all devices.
Parameters
----------
idProduct: int Default: None
The USB idProduct to match
serialNumber: str Default: None
The serial number to match, when there are still more than one device after
filtering out the idProduct. If there is a single match, the serial number
is disregarded.
Returns
-------
devices: list of Device
A list of connected devices matching the criteria provided
"""
idVendors = set()
for aClass in cls.supportedClasses():
if aClass is not None:
idVendors.add(aClass.classIdVendor)
devices = []
if idProduct is None:
for idVendor in idVendors:
devices.extend(list(usb.core.find(find_all=True, idVendor=idVendor)))
else:
for idVendor in idVendors:
devices.extend(list(usb.core.find(find_all=True, idVendor=idVendor, idProduct=idProduct)))
if serialNumber is not None: # A serial number was provided, try to match
for device in devices:
deviceSerialNumber = usb.util.get_string(device, device.iSerialNumber )
if deviceSerialNumber == serialNumber:
return [device]
return [] # Nothing matched
return devices
@classmethod
def matchUniqueUSBDevice(cls, idProduct=None, serialNumber=None):
""" A class method to find a unique device that matches the criteria provided. If there
is a single device connected, then the default parameters will make it return
that single device. The idProduct is used to filter out unwanted products. If
there are still more than one of the same product type, then the serial number
is used to separate them. If we can't find a unique device, we raise an
exception to suggest what to do.
Parameters
----------
idProduct: int Default: None
The USB idProduct to match
serialNumber: str Default: None
The serial number to match, when there are still more than one after
filtering out the idProduct. if there is a single match, the serial number
is disregarded.
Returns
-------
device: Device
A single device matching the criteria
Raises
------
RuntimeError if a single device cannot be found.
"""
devices = cls.connectedUSBDevices(idProduct=idProduct,
serialNumber=serialNumber)
device = None
if len(devices) == 1:
device = devices[0]
elif len(devices) > 1:
if serialNumber is not None:
raise NoSpectrometerConnected('Device with the appropriate serial number ({0}) was not found in the list of devices {1}'.format(serialNumber, devices))
else:
# No serial number provided, just take the first one
device = devices[0]
else:
# No devices with criteria provided
anySpectroDevice = Spectrometer.connectedUSBDevices()
if len(anySpectroDevice) == 0:
raise NoSpectrometerConnected('Device not found because there are no spectrometer devices connected.')
else:
raise NoSpectrometerConnected('Device not found. There are spectrometer devices connected {0}, but they do not match either the model or the serial number requested.'.format(anySpectroDevice))
return device
def getAllSubclasses(aClass):
allSubclasses = []
for subclass in aClass.__subclasses__():
if len(subclass.__subclasses__()) == 0:
allSubclasses.append(subclass)
else:
allSubclasses.extend(getAllSubclasses(subclass))
return allSubclasses
if __name__ == "__main__":
devices = getAllSubclasses(Spectrometer)
for dev in devices:
print("{0}".format(dev))
|
985,287 | 02b2d9eb530e78e426b62776a122120adc4fb209 | import os
import sys
import cv2
import json
import time
import socket
import datetime
from collections import defaultdict
from enum import Enum
from is_wire.core import Channel, Subscription, Message, Logger
from is_msgs.image_pb2 import ObjectAnnotations
from utils import load_options, make_pb_image, FrameVideoFetcher
from google.protobuf.json_format import MessageToDict
MIN_REQUESTS = 5
MAX_REQUESTS = 10
DEADLINE_SEC = 15.0
class State(Enum):
MAKE_REQUESTS = 1
RECV_REPLIES = 2
CHECK_END_OF_VIDEO_AND_SAVE = 3
CHECK_FOR_TIMEOUTED_REQUESTS = 4
EXIT = 5
log = Logger(name='Request2dSkeletons')
options = load_options(print_options=False)
if not os.path.exists(options.folder):
log.critical("Folder '{}' doesn't exist", options.folder)
sys.exit(-1)
files = next(os.walk(options.folder))[2] # only files from first folder level
video_files = list(filter(lambda x: x.endswith('.mp4'), files))
pending_videos = []
n_annotations = {}
for video_file in video_files:
base_name = video_file.split('.')[0]
annotation_file = '{}_2d.json'.format(base_name)
annotation_path = os.path.join(options.folder, annotation_file)
video_path = os.path.join(options.folder, video_file)
cap = cv2.VideoCapture(video_path)
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if os.path.exists(annotation_path):
# check if all annotations were done
with open(annotation_path, 'r') as f:
annotations_data = json.load(f)
n_annotations_on_file = len(annotations_data['annotations'])
if n_annotations_on_file == n_frames:
log.info(
"Video '{}' already annotated at '{}' with {} annotations",
video_file, annotations_data['created_at'],
n_annotations_on_file)
continue
pending_videos.append(video_file)
n_annotations[base_name] = n_frames
if len(pending_videos) == 0:
log.info("Exiting...")
sys.exit(-1)
channel = Channel(options.broker_uri)
subscription = Subscription(channel)
requests = {}
annotations_received = defaultdict(dict)
state = State.MAKE_REQUESTS
frame_fetcher = FrameVideoFetcher(
video_files=pending_videos, base_folder=options.folder)
while True:
if state == State.MAKE_REQUESTS:
state = State.RECV_REPLIES
if len(requests) < MIN_REQUESTS:
while len(requests) <= MAX_REQUESTS:
base_name, frame_id, frame = frame_fetcher.next()
if frame is None:
if len(requests) == 0:
state = State.EXIT
break
pb_image = make_pb_image(frame)
msg = Message(content=pb_image, reply_to=subscription)
msg.timeout = DEADLINE_SEC
channel.publish(msg, topic='SkeletonsDetector.Detect')
requests[msg.correlation_id] = {
'content': pb_image,
'base_name': base_name,
'frame_id': frame_id,
'requested_at': time.time()
}
continue
elif state == State.RECV_REPLIES:
try:
msg = channel.consume(timeout=1.0)
if msg.status.ok():
annotations = msg.unpack(ObjectAnnotations)
cid = msg.correlation_id
if cid in requests:
base_name = requests[cid]['base_name']
frame_id = requests[cid]['frame_id']
annotations_received[base_name][frame_id] = MessageToDict(
annotations,
preserving_proto_field_name=True,
including_default_value_fields=True)
del requests[cid]
state = State.CHECK_END_OF_VIDEO_AND_SAVE
except socket.timeout:
state = State.CHECK_FOR_TIMEOUTED_REQUESTS
continue
elif state == State.CHECK_END_OF_VIDEO_AND_SAVE:
for base_name in list(annotations_received.keys()):
annotations_dict = annotations_received[base_name]
if len(annotations_dict) == n_annotations[base_name]:
output_annotations = {
'annotations':
[x[1] for x in sorted(annotations_dict.items())],
'created_at':
datetime.datetime.now().isoformat()
}
filename = os.path.join(options.folder,
'{}_2d.json'.format(base_name))
with open(filename, 'w') as f:
json.dump(output_annotations, f, indent=2)
del annotations_received[base_name]
log.info('{} has been saved.', filename)
state = State.CHECK_FOR_TIMEOUTED_REQUESTS
continue
elif state == State.CHECK_FOR_TIMEOUTED_REQUESTS:
new_requests = {}
for cid in list(requests.keys()):
request = requests[cid]
if (request['requested_at'] + DEADLINE_SEC) > time.time():
continue
msg = Message(content=request['content'], reply_to=subscription)
msg.timeout = DEADLINE_SEC
channel.publish(msg, topic='SkeletonsDetector.Detect')
new_requests[msg.correlation_id] = {
'content': request['content'],
'base_name': request['base_name'],
'frame_id': request['frame_id'],
'requested_at': time.time()
}
del requests[cid]
log.warn("Message '{}' timeouted. Sending another request.", cid)
requests.update(new_requests)
state = State.MAKE_REQUESTS
continue
elif state == State.EXIT:
log.info("Exiting...")
sys.exit(-1)
else:
state = State.MAKE_REQUESTS
continue
|
985,288 | af62e2103734f256991e8e4efcb935e5f5ce6536 | import os
import pandas as pd
from collections import defaultdict
from sqlalchemy import *
from sqlalchemy.sql import select
from sqlalchemy.schema import *
from sqlalchemy.orm import sessionmaker
from invoke import task
@task
def sqlserver():
db = create_engine("mssql+pymssql://test:test@127.0.0.1/pengtao_test",
deprecate_large_types=True)
sql = "select id,name from sysobjects where xtype='U'and name<>'dtproperties' order by name"
df = pd.read_sql_query(sql, db)
print df
@task
def oracle():
"""
"""
os.environ['NLS_LANG'] = '.AL32UTF8'
db_engine=create_engine('oracle://pengtao:B5u4*Wi2@172.18.1.17:1521/orcl', echo=False)
sql = "select * from cdr_sc_2.patient where rownum < 10"
df_patient = pd.read_sql_query(sql, db_engine)
print df_patient.gender_name
|
985,289 | 4c7d61a3b204ce9c29e09d83f2de23ffcec042ba | from sb3_contrib.common.wrappers.time_feature import TimeFeatureWrapper
|
985,290 | 339a76b63a416ece2a522ebf51f19e249bc92d1e | import numpy as np
from numpy.linalg import norm
from math import copysign
class HyperEdge:
@staticmethod
def edge(allEdges, name, face, angle=0):
if angle is None:
angle = 0
if allEdges is not None:
for e in allEdges:
if e.name == name:
e.join(face, angle=angle)
return e
e = HyperEdge(name, face, angle)
try:
allEdges.append(e)
except:
pass
return e
def __init__(self, name, face=None, angle=0):
self.name = name
#self.pt1 = pt1
#self.pt2 = pt2
if face:
self.faces = {face: angle}
else:
self.faces = {}
def remove(self, face):
if face in self.faces:
self.faces.pop(face)
try:
e = face.edges.index(self)
face.disconnect(e)
except (ValueError, AttributeError):
pass
def rename(self, name):
self.name = name
def setAngle(self, face, angle):
if face in self.faces:
self.faces[face] = angle
def join(self, face, fromface=None, angle = 0, flipped = True):
baseangle = 0
if fromface in self.faces:
baseangle = self.faces[fromface]
newangle = (abs(baseangle)+angle) % 360
if flipped:
newangle = copysign(newangle, -baseangle)
else:
newangle = copysign(newangle, baseangle)
self.faces[face] = newangle
'''
def matches(self, other):
return self.length() == other.length()
'''
def mergeWith(self, other, angle=0, flip=False):
# TODO : flip orientation of edge
if other is None:
return self
for face in other.faces.keys():
da = other.faces.pop(face)
face.replaceEdge(other, self)
self.faces[face] = angle + da
return self
'''
def split(self, lengths, names=None):
tol = 1e-3 # 0.1% tolerance
edges = []
lastpt = self.pt1
d = self.pt2 - self.pt1
totlen = 0
index = 0
for length in lengths:
totlen += length
try:
name = names[index]
except:
name = self.name + ".s%d" % index
if totlen >= (1-tol) * self.length():
# less than 0.1% is probably just rounding error
if abs(totlen - self.length()) * 1.0 / totlen > .001:
print 'length exceeded by ' + repr(totlen - self.length())
break
e = HyperEdge(name, lastpt, self.pt1 + totlen/self.length() * d)
lastpt = self.pt1 + totlen/self.length() * d
edges.append(e)
index += 1
e = HyperEdge(name, lastpt, self.pt2)
edges.append(e)
return edges
'''
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return self.name + ": " + repr(self.faces)
def __repr__(self):
# return self.name + " [ # faces : %d, len : %d ]" % (len(self.faces), self.length)
ret = "%s#%d" % (self.name, len(self.faces))
if len(self.faces) > 1:
return ret + repr(self.faces.values())
else:
return ret
|
985,291 | ea7e2f5e1d80b1d3dbb2dd18cf0ec316bc45c5eb | import os
os.chdir('E:\Datacamp\Python\Introduction to database in Python')
from sqlalchemy import create_engine, select
engine = create_engine("sqlite:///:memory:") # In-memory database
connection = engine.connect()
# Import packages
from sqlalchemy import MetaData, Table
# Creaate metadata
metadata = MetaData()
##### CREATE TABLE
# Import Table, Column, String, Integer, Float, Boolean from sqlalchemy
from sqlalchemy import Table, Column, String, Integer, Float, Boolean
# Define a new table with a name, count, amount, and valid column: data
data = Table('data', metadata,
Column('name', String(255), unique=True),
Column('count', Integer(), default=1),
Column('amount', Float()),
Column('valid', Boolean(), default=False)
)
# Use the metadata to create the table
metadata.create_all(engine)
# Print the table details
print(repr(metadata.tables['data']))
data.constraints
#### INSERT 1 ROW
# Import insert and select from sqlalchemy
from sqlalchemy import insert, select
# Build an insert statement to insert a record into the data table: stmt
stmt = insert(data).values(name="Anna", count=1, amount=1000.00, valid=True)
# Execute the statement via the connection: results
results = connection.execute(stmt)
# Print result rowcount
print(results.rowcount)
# Build a select statement to validate the insert
stmt = select([data]).where(data.columns.name == "Anna")
# Print the result of executing the query.
print(connection.execute(stmt).first())
#### INSERT MULTIPLE ROWS
# Build a list of dictionaries: values_list
values_list = [
{'name': "Thanh", 'count': 3, 'amount': 1000.00, 'valid': True},
{"name": "Taylor", "count": 1, "amount": 750.00, "valid": False}
]
# Build an insert statement for the data table: stmt
stmt = insert(data)
# Execute stmt with the values_list: results
results = connection.execute(stmt, values_list)
# Print rowcount
print(results.rowcount)
connection.execute(select([data])).fetchall()
#### LOAD CSV INTO A TABLE
# Creaate metadata
metadata = MetaData()
# Define a new table
census = Table('census', metadata,
Column('state', String(30)),
Column('sex', String(1), default=1),
Column('age', Integer()),
Column('pop2000', Integer()),
Column('pop2008', Integer())
)
# Use the metadata to create the table
metadata.create_all(engine)
# Create a insert statement for census: stmt
stmt = insert(census)
# Create an empty list and zeroed row count: values_list, total_rowcount
values_list = []
total_rowcount = 0
# Define csv_reader object
import csv
# Enumerate the rows of csv_reader
with open('census.csv') as csvfile:
csv_reader = csv.reader(csvfile, delimiter = ",")
for idx, row in enumerate(csv_reader):
#create data and append to values_list
data = {'state': row[0], 'sex': row[1], 'age': row[2], 'pop2000': row[3],
'pop2008': row[4]}
values_list.append(data)
# Check to see if divisible by 51
if idx % 51 == 0:
results = connection.execute(stmt, values_list)
total_rowcount += results.rowcount
values_list = []
# Print total rowcount
print(total_rowcount)
connection.execute(select([census])).fetchall()
|
985,292 | a1c5896ccb9fa3c5ab7f2e858d06823ee8698bd9 | # -*- coding: utf-8 -*-
"""
subplots_matplot_test01
fun:
draw multi-pictures
env:
Linux ubuntu 4.4.0-31-generic x86_64 GNU;python 2.7;tensorflow1.10.1;Keras2.2.4
pip2,matplotlib2.2.3
"""
#from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
#创建一个画中画
ax1 = plt.axes()
ax2 = plt.axes([0.65, 0.65, 0.2, 0.2])
#plt.show()
#面向对象的画图接口中类似的命令由fig.add_axes(),下面用这个命令创建两个竖直排列的坐标轴
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.5, 0.8, 0.4],xticklabels=[], ylim=(-1.2, 1.2))
ax2 = fig.add_axes([0.1, 0.1, 0.8, 0.4], ylim=(-1.2, 1.2))
x = np.linspace(0, 10)
ax1.plot(np.sin(x))
ax2.plot(np.cos(x))
plt.show()
|
985,293 | 3d9792807235cd9190a094823a73a377ed9a9809 | bl_info = {
"name": "Render slots",
"author": "Chebhou",
"version": (1, 0),
"blender": (2, 74, 0),
"location": "UV/Image editor > Image >Save render slots",
"description": "Saves all render slots",
"warning": "",
"wiki_url": "",
"category": "Render"}
import bpy
from bpy.types import Operator
def save(self, context):
scene = context.scene
path = scene.render.filepath
ext = scene.render.file_extension
for img in bpy.data.images :
i = 0
if img.type == 'RENDER_RESULT' :
print(img.name)
for i in range(8):
img.render_slots.active_index = i
try :
img.save_render(path+img.name+"_slot_%d"%i+ext, scene)
print("slot %d saved"%i)
except :
print("Slot %d is empty"%i)
class save_slots(Operator):
"""Save render slots"""
bl_idname = "image.save_all_slots"
bl_label = "save render slots"
def execute(self, context):
save(self, context)
return {'FINISHED'}
def add_object_button(self, context):
self.layout.operator(
save_slots.bl_idname,
text="Save render slots",
icon='FILE_IMAGE')
def register():
bpy.utils.register_class(save_slots)
bpy.types.IMAGE_MT_image.append(add_object_button)
def unregister():
bpy.utils.unregister_class(save_slots)
bpy.types.IMAGE_MT_image.remove(add_object_button)
if __name__ == "__main__":
register()
|
985,294 | 6f8dd7eb00835c986ac5be5c7f8e6a6a569ed953 | '''
类似95 + memo
'''
class Solution:
def numTrees(self, n: int) -> int:
if n == 0:
return 0
def bulid(start, end):
if (start, end) in memo:
return memo[(start, end)]
if start > end:
return 1
if start == end:
return 1
res = 0
for i in range(start, end + 1):
l = bulid(start, i - 1)
r = bulid(i + 1, end)
res += l * r
memo[(start, end)] = res
return res
memo = {}
return bulid(1, n)
class Solution:
def numTrees(self, n: int) -> int:
if n == 0:
return 0
dp = [0] * (n + 1)
# root = None
dp[0] = 1
# root.left == None and root.right == None
dp[1] = 1
for i in range(2, n + 1):
for j in range(i):
# left * right
dp[i] += dp[j] * dp[i - j - 1]
'''
与递归保持一致
for i in range(2, n + 1):
for mid in range(i + 1):
# left * right
dp[i] += dp[mid - 1] * dp[i - mid]
'''
return dp[-1] |
985,295 | cf1383b1b5cbb5630b3c260fc6d40cb8b90f47d4 | first_line = input().split()
second_line = input().split()
third_line = input().split()
is_winner = False
def check_line(line):
global is_winner
if line[0] == line[1] == line[2]:
if line[0] == '1':
print('First player won')
is_winner = True
elif line[0] == '2':
print('Second player won')
is_winner = True
def check_column(num):
global is_winner
if first_line[num] == second_line[num] == third_line[num]:
if first_line[num] == '1':
print('First player won')
is_winner = True
elif first_line[num] == '2':
print('Second player won')
is_winner = True
def check_first_diagonal():
global is_winner
if first_line[0] == second_line[1] == third_line[2]:
if first_line[0] == '1':
print('First player won')
is_winner = True
elif first_line[0] == '2':
print('Second player won')
is_winner = True
def check_second_diagonal():
global is_winner
if first_line[2] == second_line[1] == third_line[0]:
if first_line[2] == '1':
print('First player won')
is_winner = True
elif first_line[2] == '2':
print('Second player won')
is_winner = True
check_line(first_line)
check_line(second_line)
check_line(third_line)
check_column(0)
check_column(1)
check_column(2)
check_first_diagonal()
check_second_diagonal()
if not is_winner:
print('Draw!')
|
985,296 | 48bd57aa1a7c6f10abd85b0828cbe899abe3e515 | import os, sys, re
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--maven', help="only maven build", action='store_true')
parser.add_argument('--docker', help="only docker build", action='store_true')
parser.add_argument('--version', help='version of build (MAJOR.MINOR.PATCH-TAG)')
parser.add_argument('--notests', help="deactivate integration tests", action='store_true')
parser.add_argument('--deploy', help="deploy docker container to remote", action='store_true')
args, unknown = parser.parse_known_args()
if unknown:
print("Unknown arguments "+str(unknown))
# Wrapper to print out command
def call(command):
print("Executing: "+command)
return subprocess.call(command, shell=True)
# calls build scripts in every module with same flags
def build(module):
build_command = "python build.py"
if args.maven:
build_command += " --maven"
if args.docker:
build_command += " --docker"
if args.version:
build_command += " --version="+str(args.version)
if args.deploy:
build_command += " --deploy"
if args.notests:
build_command += " --notests"
working_dir = os.path.dirname(os.path.realpath(__file__))
full_command = "cd '"+module+"' && "+build_command+" && cd '"+working_dir+"'"
print("Building "+module+" with: "+full_command)
failed = call(full_command)
if failed:
print("Failed to build module "+module)
sys.exit()
# Version Handling
if args.deploy and not args.version:
print("Please provide a version for deployment (--version=MAJOR.MINOR.PATCH-TAG)")
sys.exit()
elif args.deploy:
# for deployment, use version as it is provided
args.version = str(args.version)
elif not args.version:
# for not deployment builds, no version provided, make sure it is a SNAPSHOT build
# read project build version from local pom.xml
with open('pom.xml', 'r') as pom_file:
data=pom_file.read().replace('\n', '')
current_version = re.search('<version>(.+?)</version>', data).group(1)
if current_version:
current_version = current_version.strip()
if "SNAPSHOT" not in current_version:
args.version = current_version+"-SNAPSHOT"
else:
args.version = str(current_version)
else:
print("Failed to detect the current version")
else:
args.version = str(args.version)
if "SNAPSHOT" not in args.version:
# for not deployment builds, add snapshot tag
args.version += "-SNAPSHOT"
# update version in all maven submodules
if args.version:
failed = call("mvn versions:set -DnewVersion="+args.version)
if failed:
print("Failed to apply version "+args.version)
call("mvn versions:revert")
sys.exit()
call("mvn versions:commit")
if args.maven or (not args.maven and not args.docker):
# Check if all project can be build, otherwise exit build script
failed = call("mvn clean package")
if failed:
print("Failed to build project")
sys.exit()
# Do not deploy maven artifacts:
# call("mvn -N clean deploy")
call("mvn -N clean install")
else:
print("Only docker build is selected. Nothing to build here.")
# call build.py of every sub-project with same flags
# libraries
build("environment-lib")
build("service-lib")
# services
build("lab-service")
|
985,297 | 8db04d4d3abdad11cc3d082d2832aa13a5f91586 | from tornado.websocket import WebSocketHandler
from com.TimeWheel import TimeWheel
import logging
from com import Jwt
class PushHandler(WebSocketHandler):
TIMEOUT_SECOND = 45
users = set() # 用来存放在线用户的容器
timeWheel = TimeWheel(users, TIMEOUT_SECOND)
def open(self):
token = self.get_cookie('token')
try:
info = Jwt.parseToken(token)
except:
logging.error('[{}] token = {} is invalid, close connection'.format(self.request.remote_ip, token))
self.close()
else:
logging.info('[{}] connection add to users'.format(self.request.remote_ip))
self.users.add(self) # 建立连接后添加用户到容器中
self.timeWheel.push(self)
def on_message(self, message):
logging.info('recv \'{}\' from [{}]'.format(message, self.request.remote_ip))
self.timeWheel.push(self)
if message == 'heartbeat':
self.write_message(message)
return
def on_close(self):
logging.info('[{}] close'.format(self.request.remote_ip))
try:
self.users.remove(self) # 用户关闭连接后从容器中移除用户
except:
pass |
985,298 | 22e3e0e2fe015f420d47a7831a645f503c49711d | from .vocabulary import Vocabulary
import pickle
import os
class GloveVocabulary(Vocabulary):
def __init__(self, dim):
super(GloveVocabulary, self).__init__()
self.dim = dim
def load_vocab(self, file_name, data_dir=None):
file_name = os.path.join(data_dir,
'glove', 'processed',
file_name + '.' + str(self.dim))
with open(file_name+'_w2i.pkl', 'rb') as f:
self.word2idx = pickle.load(f)
with open(file_name+'_i2w.pkl', 'rb') as f:
self.idx2word = pickle.load(f)
self.length = len(self.word2idx)
self.add_word('<unk>')
self.add_word('<start>')
self.add_word('<end>')
self.add_word('<pad>')
def save_vocab(self, file_name='vocab.pkl', data_dir=None):
pass |
985,299 | 89014040d6d07a706789eb2bfeb65f11c52cccae | import torch
def weighted_loss(
bce_loss: torch.Tensor, label: torch.Tensor, weight: torch.Tensor
) -> torch.Tensor:
weight = label * weight.transpose(0, 1) + (1 - label)
return (bce_loss * weight).mean(dim=1).sum()
def masked_fill_for_qa(
prediction: torch.Tensor, entity_mask: torch.Tensor = None
) -> torch.Tensor:
if entity_mask is not None:
return prediction.masked_fill((1 - entity_mask).bool(), float("-inf"))
return prediction
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.