content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python
# encoding: utf-8
"""
Advent of Code 2020 - Day 8 - Challenge 1
https://adventofcode.com/2020/day/8
Solution: 1384
"""
__author__ = "Filippo Corradino"
__email__ = "filippo.corradino@gmail.com"
from aocmodule import Processor
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
2782,
1151,
286,
6127,
12131,
532,
3596,
807,
532,
13879,
352,
198,
5450,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
42334,
14,
820,
... | 2.59292 | 113 |
#!/usr/bin/env python
# coding:utf-8
# Based on GAppProxy 2.0.0 by Du XiaoGang <dugang.2008@gmail.com>
# Based on WallProxy 0.4.0 by Hust Moon <www.ehust@gmail.com>
# Contributor:
# Phus Lu <phus.lu@gmail.com>
# Hewig Xu <hewigovens@gmail.com>
# Ayanamist Yang <ayanamist@gmail.com>
# V.E.O <V.E.O@tom.com>
# Max Lv <max.c.lv@gmail.com>
# AlsoTang <alsotang@gmail.com>
# Christopher Meng <i@cicku.me>
# Yonsm Guo <YonsmGuo@gmail.com>
# Parkman <cseparkman@gmail.com>
# Ming Bai <mbbill@gmail.com>
# Bin Yu <yubinlove1991@gmail.com>
# lileixuan <lileixuan@gmail.com>
# Cong Ding <cong@cding.org>
# Zhang Youfu <zhangyoufu@gmail.com>
# Lu Wei <luwei@barfoo>
# Harmony Meow <harmony.meow@gmail.com>
# logostream <logostream@gmail.com>
# Rui Wang <isnowfy@gmail.com>
# Wang Wei Qiang <wwqgtxx@gmail.com>
# Felix Yan <felixonmars@gmail.com>
# Sui Feng <suifeng.me@qq.com>
# QXO <qxodream@gmail.com>
# Geek An <geekan@foxmail.com>
# Poly Rabbit <mcx_221@foxmail.com>
# oxnz <yunxinyi@gmail.com>
# Shusen Liu <liushusen.smart@gmail.com>
# Yad Smood <y.s.inside@gmail.com>
# Chen Shuang <cs0x7f@gmail.com>
# cnfuyu <cnfuyu@gmail.com>
# cuixin <steven.cuixin@gmail.com>
# s2marine0 <s2marine0@gmail.com>
# Toshio Xiang <snachx@gmail.com>
# Bo Tian <dxmtb@163.com>
# Virgil <variousvirgil@gmail.com>
# hub01 <miaojiabumiao@yeah.net>
# v3aqb <sgzz.cj@gmail.com>
# Oling Cat <olingcat@gmail.com>
# Meng Zhuo <mengzhuo1203@gmail.com>
# zwhfly <zwhfly@163.com>
# Hubertzhang <hubert.zyk@gmail.com>
# arrix <arrixzhou@gmail.com>
# gwjwin <gwjwin@sina.com>
# Jobin <1149225004@qq.com>
# Zhuhao Wang <zhuhaow@gmail.com>
# YFdyh000 <yfdyh000@gmail.com>
# zzq1015 <zzq1015@users.noreply.github.com>
# Zhengfa Dang <zfdang@users.noreply.github.com>
# haosdent <haosdent@gmail.com>
# xk liu <lxk1012@gmail.com>
__version__ = '3.2.3'
import os
import sys
import sysconfig
reload(sys).setdefaultencoding('UTF-8')
sys.dont_write_bytecode = True
sys.path = [(os.path.dirname(__file__) or '.') + '/packages.egg/noarch'] + sys.path + [(os.path.dirname(__file__) or '.') + '/packages.egg/' + sysconfig.get_platform().split('-')[0]]
try:
__import__('gevent.monkey', fromlist=['.']).patch_all()
except (ImportError, SystemError):
sys.exit(sys.stderr.write('please install python-gevent\n'))
import base64
import collections
import ConfigParser
import errno
import httplib
import io
import Queue
import random
import re
import socket
import ssl
import struct
import thread
import threading
import time
import traceback
import urllib2
import urlparse
import zlib
import select
import gevent
import gevent.server
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
logging = sys.modules['logging'] = Logging('logging')
from proxylib import AuthFilter
from proxylib import AutoRangeFilter
from proxylib import BaseFetchPlugin
from proxylib import BaseProxyHandlerFilter
from proxylib import BlackholeFilter
from proxylib import CertUtil
from proxylib import CipherFileObject
from proxylib import deflate
from proxylib import DirectFetchPlugin
from proxylib import DirectRegionFilter
from proxylib import dnslib_record2iplist
from proxylib import dnslib_resolve_over_tcp
from proxylib import dnslib_resolve_over_udp
from proxylib import FakeHttpsFilter
from proxylib import ForceHttpsFilter
from proxylib import CRLFSitesFilter
from proxylib import get_dnsserver_list
from proxylib import get_process_list
from proxylib import get_uptime
from proxylib import inflate
from proxylib import LocalProxyServer
from proxylib import message_html
from proxylib import MockFetchPlugin
from proxylib import AdvancedNet2
from proxylib import Net2
from proxylib import ProxyNet2
from proxylib import ProxyUtil
from proxylib import RC4Cipher
from proxylib import SimpleProxyHandler
from proxylib import spawn_later
from proxylib import StaticFileFilter
from proxylib import StripPlugin
from proxylib import StripPluginEx
from proxylib import URLRewriteFilter
from proxylib import UserAgentFilter
from proxylib import XORCipher
from proxylib import forward_socket
class RangeFetch(object):
"""Range Fetch Class"""
threads = 2
maxsize = 1024*1024*4
bufsize = 8192
waitsize = 1024*512
class GAEFetchPlugin(BaseFetchPlugin):
"""gae fetch plugin"""
max_retry = 2
class PHPFetchPlugin(BaseFetchPlugin):
"""php fetch plugin"""
class VPSServer(gevent.server.StreamServer):
"""vps server"""
net2 = Net2()
def forward_socket(self, local, remote, timeout, bufsize):
"""forward socket"""
tick = 1
count = timeout
while 1:
count -= tick
if count <= 0:
break
ins, _, errors = select.select([local, remote], [], [local, remote], tick)
if remote in errors:
local.close()
remote.close()
return
if local in errors:
local.close()
remote.close()
return
if remote in ins:
data = remote.recv(bufsize)
if not data:
remote.close()
local.close()
return
local.sendall(data)
if local in ins:
data = local.recv(bufsize)
if not data:
remote.close()
local.close()
return
remote.sendall(data)
if ins:
count = timeout
class GAEFetchFilter(BaseProxyHandlerFilter):
"""gae fetch filter"""
#https://github.com/AppScale/gae_sdk/blob/master/google/appengine/api/taskqueue/taskqueue.py#L241
MAX_URL_LENGTH = 2083
def filter(self, handler):
"""https://developers.google.com/appengine/docs/python/urlfetch/"""
if handler.command == 'CONNECT':
do_ssl_handshake = 440 <= handler.port <= 450 or 1024 <= handler.port <= 65535
alias = handler.net2.getaliasbyname(handler.path)
if alias:
return 'direct', {'cache_key': '%s:%d' % (alias, handler.port), 'headfirst': '.google' in handler.host}
else:
return 'strip', {'do_ssl_handshake': do_ssl_handshake}
elif handler.command in ('GET', 'POST', 'HEAD', 'PUT', 'DELETE', 'PATCH'):
alias = handler.net2.getaliasbyname(handler.path)
if alias:
return 'direct', {'cache_key': '%s:%d' % (alias, handler.port), 'headfirst': '.google' in handler.host}
else:
return 'gae', {}
else:
if 'php' in handler.handler_plugins:
return 'php', {}
else:
logging.warning('"%s %s" not supported by GAE, please enable PHP mode!', handler.command, handler.path)
return 'direct', {}
class WithGAEFilter(BaseProxyHandlerFilter):
"""withgae/withphp filter"""
class GAEProxyHandler(SimpleProxyHandler):
"""GAE Proxy Handler"""
handler_filters = [GAEFetchFilter()]
handler_plugins = {'direct': DirectFetchPlugin(),
'mock': MockFetchPlugin(),
'strip': StripPlugin(),}
def first_run(self):
"""GAEProxyHandler setup, init domain/iplist map"""
if not common.PROXY_ENABLE:
logging.info('resolve common.IPLIST_ALIAS names=%s to iplist', list(common.IPLIST_ALIAS))
common.resolve_iplist()
random.shuffle(common.GAE_APPIDS)
self.__class__.handler_plugins['gae'] = GAEFetchPlugin(common.GAE_APPIDS, common.GAE_PASSWORD, common.GAE_PATH, common.GAE_MODE, common.GAE_CACHESOCK, common.GAE_KEEPALIVE, common.GAE_OBFUSCATE, common.GAE_PAGESPEED, common.GAE_VALIDATE, common.GAE_OPTIONS)
if not common.PROXY_ENABLE:
net2 = AdvancedNet2(window=common.GAE_WINDOW, ssl_version=common.GAE_SSLVERSION, dns_servers=common.DNS_SERVERS, dns_blacklist=common.DNS_BLACKLIST)
for name, iplist in common.IPLIST_ALIAS.items():
net2.add_iplist_alias(name, iplist)
if name == 'google_hk':
for delay in (30, 60, 150, 240, 300, 450, 600, 900):
spawn_later(delay, self.extend_iplist, name)
net2.add_fixed_iplist(common.IPLIST_PREDEFINED)
for pattern, hosts in common.RULE_MAP.items():
net2.add_rule(pattern, hosts)
if common.GAE_CACHESOCK:
net2.enable_connection_cache()
if common.GAE_KEEPALIVE:
net2.enable_connection_keepalive()
net2.enable_openssl_session_cache()
self.__class__.net2 = net2
class PHPFetchFilter(BaseProxyHandlerFilter):
"""php fetch filter"""
class PHPProxyHandler(SimpleProxyHandler):
"""PHP Proxy Handler"""
handler_filters = [PHPFetchFilter()]
handler_plugins = {'direct': DirectFetchPlugin(),
'mock': MockFetchPlugin(),
'strip': StripPlugin(),}
def first_run(self):
"""PHPProxyHandler setup, init domain/iplist map"""
if not common.PROXY_ENABLE:
hostname = urlparse.urlsplit(common.PHP_FETCHSERVER).hostname
net2 = AdvancedNet2(window=4, ssl_version='TLSv1', dns_servers=common.DNS_SERVERS, dns_blacklist=common.DNS_BLACKLIST)
if not common.PHP_HOSTS:
common.PHP_HOSTS = net2.gethostsbyname(hostname)
net2.add_iplist_alias('php_fetchserver', common.PHP_HOSTS)
net2.add_fixed_iplist(common.PHP_HOSTS)
net2.add_rule(hostname, 'php_fetchserver')
net2.enable_connection_cache()
if common.PHP_KEEPALIVE:
net2.enable_connection_keepalive()
net2.enable_openssl_session_cache()
self.__class__.net2 = net2
class PacUtil(object):
"""GoAgent Pac Util"""
@staticmethod
@staticmethod
@staticmethod
def autoproxy2pac(content, func_name='FindProxyForURLByAutoProxy', proxy='127.0.0.1:8087', default='DIRECT', indent=4):
"""Autoproxy to Pac, based on https://github.com/iamamac/autoproxy2pac"""
jsLines = []
for line in content.splitlines()[1:]:
if line and not line.startswith("!"):
use_proxy = True
if line.startswith("@@"):
line = line[2:]
use_proxy = False
return_proxy = 'PROXY %s' % proxy if use_proxy else default
if line.startswith('/') and line.endswith('/'):
jsLine = 'if (/%s/i.test(url)) return "%s";' % (line[1:-1], return_proxy)
elif line.startswith('||'):
domain = line[2:].lstrip('.')
if len(jsLines) > 0 and ('host.indexOf(".%s") >= 0' % domain in jsLines[-1] or 'host.indexOf("%s") >= 0' % domain in jsLines[-1]):
jsLines.pop()
jsLine = 'if (dnsDomainIs(host, ".%s") || host == "%s") return "%s";' % (domain, domain, return_proxy)
elif line.startswith('|'):
jsLine = 'if (url.indexOf("%s") == 0) return "%s";' % (line[1:], return_proxy)
elif '*' in line:
jsLine = 'if (shExpMatch(url, "*%s*")) return "%s";' % (line.strip('*'), return_proxy)
elif '/' not in line:
jsLine = 'if (host.indexOf("%s") >= 0) return "%s";' % (line, return_proxy)
else:
jsLine = 'if (url.indexOf("%s") >= 0) return "%s";' % (line, return_proxy)
jsLine = ' ' * indent + jsLine
if use_proxy:
jsLines.append(jsLine)
else:
jsLines.insert(0, jsLine)
function = 'function %s(url, host) {\r\n%s\r\n%sreturn "%s";\r\n}' % (func_name, '\n'.join(jsLines), ' '*indent, default)
return function
@staticmethod
def autoproxy2pac_lite(content, func_name='FindProxyForURLByAutoProxy', proxy='127.0.0.1:8087', default='DIRECT', indent=4):
"""Autoproxy to Pac, based on https://github.com/iamamac/autoproxy2pac"""
direct_domain_set = set([])
proxy_domain_set = set([])
for line in content.splitlines()[1:]:
if line and not line.startswith(('!', '|!', '||!')):
use_proxy = True
if line.startswith("@@"):
line = line[2:]
use_proxy = False
domain = ''
if line.startswith('/') and line.endswith('/'):
line = line[1:-1]
if line.startswith('^https?:\\/\\/[^\\/]+') and re.match(r'^(\w|\\\-|\\\.)+$', line[18:]):
domain = line[18:].replace(r'\.', '.')
else:
logging.warning('unsupport gfwlist regex: %r', line)
elif line.startswith('||'):
domain = line[2:].lstrip('*').rstrip('/')
elif line.startswith('|'):
domain = urlparse.urlsplit(line[1:]).hostname.lstrip('*')
elif line.startswith(('http://', 'https://')):
domain = urlparse.urlsplit(line).hostname.lstrip('*')
elif re.search(r'^([\w\-\_\.]+)([\*\/]|$)', line):
domain = re.split(r'[\*\/]', line)[0]
else:
pass
if '*' in domain:
domain = domain.split('*')[-1]
if not domain or re.match(r'^\w+$', domain):
logging.debug('unsupport gfwlist rule: %r', line)
continue
if use_proxy:
proxy_domain_set.add(domain)
else:
direct_domain_set.add(domain)
proxy_domain_list = sorted(set(x.lstrip('.') for x in proxy_domain_set))
autoproxy_host = ',\r\n'.join('%s"%s": 1' % (' '*indent, x) for x in proxy_domain_list)
template = '''\
var autoproxy_host = {
%(autoproxy_host)s
};
function %(func_name)s(url, host) {
var lastPos;
do {
if (autoproxy_host.hasOwnProperty(host)) {
return 'PROXY %(proxy)s';
}
lastPos = host.indexOf('.') + 1;
host = host.slice(lastPos);
} while (lastPos >= 1);
return '%(default)s';
}'''
template = re.sub(r'(?m)^\s{%d}' % min(len(re.search(r' +', x).group()) for x in template.splitlines()), '', template)
template_args = {'autoproxy_host': autoproxy_host,
'func_name': func_name,
'proxy': proxy,
'default': default}
return template % template_args
@staticmethod
def urlfilter2pac(content, func_name='FindProxyForURLByUrlfilter', proxy='127.0.0.1:8086', default='DIRECT', indent=4):
"""urlfilter.ini to Pac, based on https://github.com/iamamac/autoproxy2pac"""
jsLines = []
for line in content[content.index('[exclude]'):].splitlines()[1:]:
if line and not line.startswith(';'):
use_proxy = True
if line.startswith("@@"):
line = line[2:]
use_proxy = False
return_proxy = 'PROXY %s' % proxy if use_proxy else default
if '*' in line:
jsLine = 'if (shExpMatch(url, "%s")) return "%s";' % (line, return_proxy)
else:
jsLine = 'if (url == "%s") return "%s";' % (line, return_proxy)
jsLine = ' ' * indent + jsLine
if use_proxy:
jsLines.append(jsLine)
else:
jsLines.insert(0, jsLine)
function = 'function %s(url, host) {\r\n%s\r\n%sreturn "%s";\r\n}' % (func_name, '\n'.join(jsLines), ' '*indent, default)
return function
@staticmethod
def adblock2pac(content, func_name='FindProxyForURLByAdblock', proxy='127.0.0.1:8086', default='DIRECT', admode=1, indent=4):
"""adblock list to Pac, based on https://github.com/iamamac/autoproxy2pac"""
white_conditions = {'host': [], 'url.indexOf': [], 'shExpMatch': []}
black_conditions = {'host': [], 'url.indexOf': [], 'shExpMatch': []}
for line in content.splitlines()[1:]:
if not line or line.startswith('!') or '##' in line or '#@#' in line:
continue
use_proxy = True
use_start = False
use_end = False
use_domain = False
use_postfix = []
if '$' in line:
posfixs = line.split('$')[-1].split(',')
if any('domain' in x for x in posfixs):
continue
if 'image' in posfixs:
use_postfix += ['.jpg', '.gif']
elif 'script' in posfixs:
use_postfix += ['.js']
else:
continue
line = line.split('$')[0]
if line.startswith("@@"):
line = line[2:]
use_proxy = False
if '||' == line[:2]:
line = line[2:]
if '/' not in line:
use_domain = True
else:
use_start = True
elif '|' == line[0]:
line = line[1:]
use_start = True
if line[-1] in ('^', '|'):
line = line[:-1]
if not use_postfix:
use_end = True
line = line.replace('^', '*').strip('*')
conditions = black_conditions if use_proxy else white_conditions
if use_start and use_end:
conditions['shExpMatch'] += ['*%s*' % line]
elif use_start:
if '*' in line:
if use_postfix:
conditions['shExpMatch'] += ['*%s*%s' % (line, x) for x in use_postfix]
else:
conditions['shExpMatch'] += ['*%s*' % line]
else:
conditions['url.indexOf'] += [line]
elif use_domain and use_end:
if '*' in line:
conditions['shExpMatch'] += ['%s*' % line]
else:
conditions['host'] += [line]
elif use_domain:
if line.split('/')[0].count('.') <= 1:
if use_postfix:
conditions['shExpMatch'] += ['*.%s*%s' % (line, x) for x in use_postfix]
else:
conditions['shExpMatch'] += ['*.%s*' % line]
else:
if '*' in line:
if use_postfix:
conditions['shExpMatch'] += ['*%s*%s' % (line, x) for x in use_postfix]
else:
conditions['shExpMatch'] += ['*%s*' % line]
else:
if use_postfix:
conditions['shExpMatch'] += ['*%s*%s' % (line, x) for x in use_postfix]
else:
conditions['url.indexOf'] += ['http://%s' % line]
else:
if use_postfix:
conditions['shExpMatch'] += ['*%s*%s' % (line, x) for x in use_postfix]
else:
conditions['shExpMatch'] += ['*%s*' % line]
templates = ['''\
function %(func_name)s(url, host) {
return '%(default)s';
}''',
'''\
var blackhole_host = {
%(blackhole_host)s
};
function %(func_name)s(url, host) {
// untrusted ablock plus list, disable whitelist until chinalist come back.
if (blackhole_host.hasOwnProperty(host)) {
return 'PROXY %(proxy)s';
}
return '%(default)s';
}''',
'''\
var blackhole_host = {
%(blackhole_host)s
};
var blackhole_url_indexOf = [
%(blackhole_url_indexOf)s
];
function %s(url, host) {
// untrusted ablock plus list, disable whitelist until chinalist come back.
if (blackhole_host.hasOwnProperty(host)) {
return 'PROXY %(proxy)s';
}
for (i = 0; i < blackhole_url_indexOf.length; i++) {
if (url.indexOf(blackhole_url_indexOf[i]) >= 0) {
return 'PROXY %(proxy)s';
}
}
return '%(default)s';
}''',
'''\
var blackhole_host = {
%(blackhole_host)s
};
var blackhole_url_indexOf = [
%(blackhole_url_indexOf)s
];
var blackhole_shExpMatch = [
%(blackhole_shExpMatch)s
];
function %(func_name)s(url, host) {
// untrusted ablock plus list, disable whitelist until chinalist come back.
if (blackhole_host.hasOwnProperty(host)) {
return 'PROXY %(proxy)s';
}
for (i = 0; i < blackhole_url_indexOf.length; i++) {
if (url.indexOf(blackhole_url_indexOf[i]) >= 0) {
return 'PROXY %(proxy)s';
}
}
for (i = 0; i < blackhole_shExpMatch.length; i++) {
if (shExpMatch(url, blackhole_shExpMatch[i])) {
return 'PROXY %(proxy)s';
}
}
return '%(default)s';
}''']
template = re.sub(r'(?m)^\s{%d}' % min(len(re.search(r' +', x).group()) for x in templates[admode].splitlines()), '', templates[admode])
template_kwargs = {'blackhole_host': ',\r\n'.join("%s'%s': 1" % (' '*indent, x) for x in sorted(black_conditions['host'])),
'blackhole_url_indexOf': ',\r\n'.join("%s'%s'" % (' '*indent, x) for x in sorted(black_conditions['url.indexOf'])),
'blackhole_shExpMatch': ',\r\n'.join("%s'%s'" % (' '*indent, x) for x in sorted(black_conditions['shExpMatch'])),
'func_name': func_name,
'proxy': proxy,
'default': default}
return template % template_kwargs
class PacFileFilter(BaseProxyHandlerFilter):
"""pac file filter"""
class PACProxyHandler(SimpleProxyHandler):
"""pac proxy handler"""
handler_filters = [PacFileFilter(), StaticFileFilter(), BlackholeFilter()]
class Common(object):
"""Global Config Object"""
ENV_CONFIG_PREFIX = 'GOAGENT_'
def __init__(self):
"""load config from proxy.ini"""
ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>\S+)\s+(?P<vi>[=])\s+(?P<value>.*)$')
self.CONFIG = ConfigParser.ConfigParser()
self.CONFIG_FILENAME = os.path.splitext(os.path.abspath(__file__))[0]+'.ini'
self.CONFIG_USER_FILENAME = re.sub(r'\.ini$', '.user.ini', self.CONFIG_FILENAME)
self.CONFIG.read([self.CONFIG_FILENAME, self.CONFIG_USER_FILENAME])
for key, value in os.environ.items():
m = re.match(r'^%s([A-Z]+)_([A-Z\_\-]+)$' % self.ENV_CONFIG_PREFIX, key)
if m:
self.CONFIG.set(m.group(1).lower(), m.group(2).lower(), value)
self.LISTEN_IP = self.CONFIG.get('listen', 'ip')
self.LISTEN_PORT = self.CONFIG.getint('listen', 'port')
self.LISTEN_USERNAME = self.CONFIG.get('listen', 'username') if self.CONFIG.has_option('listen', 'username') else ''
self.LISTEN_PASSWORD = self.CONFIG.get('listen', 'password') if self.CONFIG.has_option('listen', 'password') else ''
self.LISTEN_VISIBLE = self.CONFIG.getint('listen', 'visible')
self.LISTEN_DEBUGINFO = self.CONFIG.getint('listen', 'debuginfo')
self.GAE_ENABLE = self.CONFIG.getint('gae', 'enable')
self.GAE_APPIDS = re.findall(r'[\w\-\.]+', self.CONFIG.get('gae', 'appid').replace('.appspot.com', ''))
self.GAE_PASSWORD = self.CONFIG.get('gae', 'password').strip()
self.GAE_PATH = self.CONFIG.get('gae', 'path')
self.GAE_MODE = self.CONFIG.get('gae', 'mode')
self.GAE_IPV6 = self.CONFIG.getint('gae', 'ipv6')
self.GAE_WINDOW = self.CONFIG.getint('gae', 'window')
self.GAE_KEEPALIVE = self.CONFIG.getint('gae', 'keepalive')
self.GAE_CACHESOCK = self.CONFIG.getint('gae', 'cachesock')
self.GAE_HEADFIRST = self.CONFIG.getint('gae', 'headfirst')
self.GAE_OBFUSCATE = self.CONFIG.getint('gae', 'obfuscate')
self.GAE_VALIDATE = self.CONFIG.getint('gae', 'validate')
self.GAE_TRANSPORT = self.CONFIG.getint('gae', 'transport') if self.CONFIG.has_option('gae', 'transport') else 0
self.GAE_OPTIONS = self.CONFIG.get('gae', 'options')
self.GAE_REGIONS = set(x.upper() for x in self.CONFIG.get('gae', 'regions').split('|') if x.strip())
self.GAE_SSLVERSION = self.CONFIG.get('gae', 'sslversion')
self.GAE_PAGESPEED = self.CONFIG.getint('gae', 'pagespeed') if self.CONFIG.has_option('gae', 'pagespeed') else 0
if self.GAE_IPV6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.connect(('2001:4860:4860::8888', 53))
logging.info('use ipv6 interface %s for gae', sock.getsockname()[0])
except Exception as e:
logging.info('Fail try use ipv6 %r, fallback ipv4', e)
self.GAE_IPV6 = 0
finally:
if sock:
sock.close()
if 'USERDNSDOMAIN' in os.environ and re.match(r'^\w+\.\w+$', os.environ['USERDNSDOMAIN']):
self.CONFIG.set('profile', '.' + os.environ['USERDNSDOMAIN'], 'direct')
withgae_sites = []
withphp_sites = []
crlf_sites = []
nocrlf_sites = []
forcehttps_sites = []
noforcehttps_sites = []
fakehttps_sites = []
nofakehttps_sites = []
dns_servers = []
urlrewrite_map = collections.OrderedDict()
rule_map = collections.OrderedDict()
for pattern, rule in self.CONFIG.items('profile'):
rules = [x.strip() for x in re.split(r'[,\|]', rule) if x.strip()]
if rule.startswith(('file://', 'http://', 'https://')) or '$1' in rule:
urlrewrite_map[pattern] = rule
continue
for rule, sites in [('withgae', withgae_sites),
('withphp', withphp_sites),
('crlf', crlf_sites),
('nocrlf', nocrlf_sites),
('forcehttps', forcehttps_sites),
('noforcehttps', noforcehttps_sites),
('fakehttps', fakehttps_sites),
('nofakehttps', nofakehttps_sites)]:
if rule in rules:
sites.append(pattern)
rules.remove(rule)
if rules:
rule_map[pattern] = rules[0]
self.HTTP_DNS = dns_servers
self.WITHGAE_SITES = tuple(withgae_sites)
self.WITHPHP_SITES = tuple(withphp_sites)
self.CRLF_SITES = tuple(crlf_sites)
self.NOCRLF_SITES = set(nocrlf_sites)
self.FORCEHTTPS_SITES = tuple(forcehttps_sites)
self.NOFORCEHTTPS_SITES = set(noforcehttps_sites)
self.FAKEHTTPS_SITES = tuple(fakehttps_sites)
self.NOFAKEHTTPS_SITES = set(nofakehttps_sites)
self.URLREWRITE_MAP = urlrewrite_map
self.RULE_MAP = rule_map
self.IPLIST_ALIAS = collections.OrderedDict((k, v.split('|') if v else []) for k, v in self.CONFIG.items('iplist'))
self.IPLIST_PREDEFINED = [x for x in sum(self.IPLIST_ALIAS.values(), []) if re.match(r'^\d+\.\d+\.\d+\.\d+$', x) or ':' in x]
if self.GAE_IPV6 and 'google_ipv6' in self.IPLIST_ALIAS:
for name in self.IPLIST_ALIAS.keys():
if name.startswith('google') and name not in ('google_ipv6', 'google_talk'):
self.IPLIST_ALIAS[name] = self.IPLIST_ALIAS['google_ipv6']
self.PAC_ENABLE = self.CONFIG.getint('pac', 'enable')
self.PAC_IP = self.CONFIG.get('pac', 'ip')
self.PAC_PORT = self.CONFIG.getint('pac', 'port')
self.PAC_FILE = self.CONFIG.get('pac', 'file').lstrip('/')
self.PAC_GFWLIST = self.CONFIG.get('pac', 'gfwlist')
self.PAC_ADBLOCK = self.CONFIG.get('pac', 'adblock')
self.PAC_ADMODE = self.CONFIG.getint('pac', 'admode')
self.PAC_EXPIRED = self.CONFIG.getint('pac', 'expired')
self.PHP_ENABLE = self.CONFIG.getint('php', 'enable')
self.PHP_LISTEN = self.CONFIG.get('php', 'listen')
self.PHP_PASSWORD = self.CONFIG.get('php', 'password') if self.CONFIG.has_option('php', 'password') else ''
self.PHP_CRLF = self.CONFIG.getint('php', 'crlf') if self.CONFIG.has_option('php', 'crlf') else 1
self.PHP_VALIDATE = self.CONFIG.getint('php', 'validate') if self.CONFIG.has_option('php', 'validate') else 0
self.PHP_KEEPALIVE = self.CONFIG.getint('php', 'keepalive')
self.PHP_FETCHSERVER = self.CONFIG.get('php', 'fetchserver')
self.PHP_HOSTS = self.CONFIG.get('php', 'hosts').split('|') if self.CONFIG.get('php', 'hosts') else []
self.VPS_ENABLE = self.CONFIG.getint('vps', 'enable')
self.VPS_LISTEN = self.CONFIG.get('vps', 'listen')
self.VPS_FETCHSERVER = self.CONFIG.get('vps', 'fetchserver')
self.PROXY_ENABLE = self.CONFIG.getint('proxy', 'enable')
self.PROXY_AUTODETECT = self.CONFIG.getint('proxy', 'autodetect') if self.CONFIG.has_option('proxy', 'autodetect') else 0
self.PROXY_HOST = self.CONFIG.get('proxy', 'host')
self.PROXY_PORT = self.CONFIG.getint('proxy', 'port')
self.PROXY_USERNAME = self.CONFIG.get('proxy', 'username')
self.PROXY_PASSWROD = self.CONFIG.get('proxy', 'password')
if not self.PROXY_ENABLE and self.PROXY_AUTODETECT:
system_proxy = ProxyUtil.get_system_proxy()
if system_proxy and self.LISTEN_IP not in system_proxy:
_, username, password, address = ProxyUtil.parse_proxy(system_proxy)
proxyhost, _, proxyport = address.rpartition(':')
self.PROXY_ENABLE = 1
self.PROXY_USERNAME = username
self.PROXY_PASSWROD = password
self.PROXY_HOST = proxyhost
self.PROXY_PORT = int(proxyport)
if self.PROXY_ENABLE:
self.GAE_MODE = 'https'
self.AUTORANGE_HOSTS = self.CONFIG.get('autorange', 'hosts').split('|')
self.AUTORANGE_ENDSWITH = tuple(self.CONFIG.get('autorange', 'endswith').split('|'))
self.AUTORANGE_NOENDSWITH = tuple(self.CONFIG.get('autorange', 'noendswith').split('|'))
self.AUTORANGE_MAXSIZE = self.CONFIG.getint('autorange', 'maxsize')
self.AUTORANGE_WAITSIZE = self.CONFIG.getint('autorange', 'waitsize')
self.AUTORANGE_BUFSIZE = self.CONFIG.getint('autorange', 'bufsize')
self.AUTORANGE_THREADS = self.CONFIG.getint('autorange', 'threads')
self.FETCHMAX_LOCAL = self.CONFIG.getint('fetchmax', 'local') if self.CONFIG.get('fetchmax', 'local') else 3
self.FETCHMAX_SERVER = self.CONFIG.get('fetchmax', 'server')
self.DNS_ENABLE = self.CONFIG.getint('dns', 'enable')
self.DNS_LISTEN = self.CONFIG.get('dns', 'listen')
self.DNS_SERVERS = self.HTTP_DNS or self.CONFIG.get('dns', 'servers').split('|')
self.DNS_BLACKLIST = set(self.CONFIG.get('dns', 'blacklist').split('|'))
self.DNS_TCPOVER = tuple(self.CONFIG.get('dns', 'tcpover').split('|')) if self.CONFIG.get('dns', 'tcpover').strip() else tuple()
if self.GAE_IPV6:
self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' in x]
else:
self.DNS_SERVERS = [x for x in self.DNS_SERVERS if ':' not in x]
self.USERAGENT_ENABLE = self.CONFIG.getint('useragent', 'enable')
self.USERAGENT_STRING = self.CONFIG.get('useragent', 'string')
self.LOVE_ENABLE = self.CONFIG.getint('love', 'enable')
self.LOVE_TIP = self.CONFIG.get('love', 'tip').encode('utf8').decode('unicode-escape').split('|')
common = Common()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
40477,
12,
23,
198,
2,
13403,
319,
402,
4677,
44148,
362,
13,
15,
13,
15,
416,
10343,
28249,
38,
648,
1279,
646,
28284,
13,
11528,
31,
14816,
13,
785,
29,
198,
2,
1... | 1.904051 | 17,947 |
# -*- coding: utf-8 -*-
from __future__ import division
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import django.forms as forms
from django import http
from course.validation import validate_struct, ValidationError
from course.constants import MAX_EXTRA_CREDIT_FACTOR
from relate.utils import StyledForm, Struct, string_concat
from django.forms import ValidationError as FormValidationError
from django.utils.safestring import mark_safe
from django.utils.functional import lazy
from django.utils.translation import (
gettext_lazy as _,
gettext_noop,
)
from django.conf import settings
# {{{ mypy
from typing import (Text, Optional, Any, Tuple, Dict, Callable, FrozenSet, Union,
TYPE_CHECKING)
if TYPE_CHECKING:
# FIXME There seem to be some cyclic imports that prevent importing these
# outright.
from course.models import ( # noqa
Course,
FlowSession
)
from relate.utils import Repo_ish
# }}}
__doc__ = """
Stub Docs of Internals
======================
.. class:: Repo_ish
See ``relate.utils.Repo_ish``.
.. class:: Course
See ``course.models.Course``.
.. class:: FlowSession
See ``course.models.FlowSession``.
Page Interface
==============
.. autoclass:: PageContext
.. autoclass:: PageBehavior
.. autoclass:: AnswerFeedback
.. exception:: InvalidPageData
.. autoclass:: PageBase
"""
mark_safe_lazy = lazy(mark_safe, str)
class PageContext(object):
"""
.. attribute:: course
.. attribute:: repo
.. attribute:: commit_sha
.. attribute:: flow_session
May be None.
.. attribute:: page_uri
Note that this is different from :class:`course.utils.FlowPageContext`,
which is used internally by the flow views.
"""
class PageBehavior(object):
"""
.. attribute:: show_correctness
.. attribute:: show_answer
.. attribute:: may_change_answer
"""
__nonzero__ = __bool__
# {{{ answer feedback type
def round_point_count_to_quarters(value, atol=1e-5):
# type: (float, float) -> Union[float, int]
"""
If 'value' is close to an int, a half or quarter, return the close value,
otherwise return the original value.
"""
if abs(value - int(value)) < atol:
return int(value)
import math
_atol = atol * 4
v = value * 4
if abs(v - math.floor(v)) < _atol:
v = math.floor(v)
elif abs(v - math.ceil(v)) < _atol:
v = math.ceil(v)
else:
return value
return round(v / 4, 2)
class AnswerFeedback(object):
"""
.. attribute:: correctness
A :class:`float` between 0 and 1 (inclusive),
indicating the degree of correctness of the
answer. May be *None*.
.. attribute:: feedback
Text (at least as a full sentence, or even multi-paragraph HTML)
providing feedback to the student about the provided answer. Should not
reveal the correct answer.
May be None, in which case generic feedback
is generated from :attr:`correctness`.
.. attribute:: bulk_feedback
"""
@staticmethod
# }}}
# {{{ abstract page base class
class PageBase(object):
"""The abstract interface of a flow page.
.. attribute:: location
A string 'location' for reporting errors.
.. attribute:: id
The page identifier.
.. automethod:: required_attrs
.. automethod:: allowed_attrs
.. automethod:: get_modified_permissions_for_page
.. automethod:: initialize_page_data
.. automethod:: title
.. automethod:: body
.. automethod:: expects_answer
.. automethod:: is_answer_gradable
.. automethod:: max_points
.. rubric:: Student Input
.. automethod:: answer_data
.. automethod:: make_form
.. automethod:: process_form_post
.. automethod:: form_to_html
.. rubric:: Grader Input
.. automethod:: make_grading_form
.. automethod:: post_grading_form
.. automethod:: update_grade_data_from_grading_form_v2
.. automethod:: grading_form_to_html
.. rubric:: Grading/Feedback
.. automethod:: grade
.. automethod:: correct_answer
.. automethod:: analytic_view_body
.. automethod:: normalized_answer
.. automethod:: normalized_bytes_answer
"""
def __init__(self, vctx, location, page_desc):
"""
:arg vctx: a :class:`course.validation.ValidationContext`, or None
if no validation is desired
"""
self.location = location
if isinstance(page_desc, Struct):
if vctx is not None:
validate_struct(
vctx,
location,
page_desc,
required_attrs=self.required_attrs(),
allowed_attrs=self.allowed_attrs())
# {{{ validate access_rules
if hasattr(page_desc, "access_rules"):
ar_loc = "%s: access rules" % location
validate_struct(
vctx,
ar_loc,
page_desc.access_rules,
required_attrs=(),
allowed_attrs=(
("add_permissions", list),
("remove_permissions", list),
))
from course.validation import validate_flow_permission
for attr in ["add_permissions", "remove_permissions"]:
if hasattr(page_desc.access_rules, attr):
for perm in getattr(page_desc.access_rules, attr):
validate_flow_permission(
vctx,
"%s: %s" % (ar_loc, attr),
perm)
# }}}
self.page_desc = page_desc
self.is_optional_page = getattr(page_desc, "is_optional_page", False)
else:
from warnings import warn
warn(_("Not passing page_desc to PageBase.__init__ is deprecated"),
DeprecationWarning)
id = page_desc
del page_desc
self.id = id
def required_attrs(self):
"""Required attributes, as accepted by
:func:`course.validation.validate_struct`.
Subclasses should only add to, not remove entries from this.
"""
return (
("id", str),
("type", str),
)
def allowed_attrs(self):
"""Allowed attributes, as accepted by
:func:`course.validation.validate_struct`.
Subclasses should only add to, not remove entries from this.
"""
return (
("access_rules", Struct),
("is_optional_page", bool),
)
def initialize_page_data(self, page_context):
# type: (PageContext) -> Dict
"""Return (possibly randomly generated) data that is used to generate
the content on this page. This is passed to methods below as the *page_data*
argument. One possible use for this argument would be a random permutation
of choices that is generated once (at flow setup) and then used whenever
this page is shown.
"""
data = self.make_page_data()
if data:
from warnings import warn
warn(_("%s is using the make_page_data compatiblity hook, which "
"is deprecated.") % type(self).__name__,
DeprecationWarning)
return data
def title(self, page_context, page_data):
# type: (PageContext, Dict) -> str
"""Return the (non-HTML) title of this page."""
raise NotImplementedError()
def analytic_view_body(self, page_context, page_data):
# type: (PageContext, Dict) -> str
"""
Return the (HTML) body of the page, which is shown in page analytic
view."""
return self.body(page_context, page_data)
def body(self, page_context, page_data):
# type: (PageContext, Dict) -> str
"""Return the (HTML) body of the page."""
raise NotImplementedError()
def expects_answer(self):
# type: () -> bool
"""
:return: a :class:`bool` indicating whether this page lets the
user provide an answer of some type.
"""
raise NotImplementedError()
def is_answer_gradable(self):
# type: () -> bool
"""
:return: a :class:`bool` indicating whether answers on this can
have :meth:`grade` called on them.
True by default.
"""
return True
def max_points(self, page_data):
# type: (Any) -> float
"""
:return: a :class:`int` or :class:`float` indicating how many points
are achievable on this page.
"""
raise NotImplementedError()
# {{{ student input
def answer_data(
self,
page_context: PageContext,
page_data: Any,
form: forms.Form,
files_data: Any,
) -> Any:
"""Return a JSON-persistable object reflecting the user's answer on the
form. This will be passed to methods below as *answer_data*.
"""
raise NotImplementedError()
def make_form(
self,
page_context, # type: PageContext
page_data, # type: Any
answer_data, # type: Any
page_behavior, # type: Any
):
"""
:arg answer_data: value returned by :meth:`answer_data`.
May be *None*.
:arg page_behavior: an instance of :class:`PageBehavior`
:return:
a :class:`django.forms.Form` instance with *answer_data* prepopulated.
If ``page_behavior.may_change_answer`` is *False*, the form should
be read-only.
"""
raise NotImplementedError()
def process_form_post(
self,
page_context, # type: PageContext
page_data, # type: Any
post_data, # type: Any
files_data, # type: Any
page_behavior, # type: PageBehavior
) -> forms.Form:
"""Return a form with the POST response from *post_data* and *files_data*
filled in.
:arg page_behavior: an instance of :class:`PageBehavior`
:return: a
:class:`django.forms.Form` instance with *answer_data* prepopulated.
If ``page_behavior.may_change_answer`` is *False*, the form should
be read-only.
"""
from warnings import warn
warn(_("%s is using the post_form compatiblity hook, which "
"is deprecated.") % type(self).__name__,
DeprecationWarning)
return self.post_form(page_context, page_data, post_data, files_data)
def form_to_html(
self,
request, # type: http.HttpRequest
page_context, # type: PageContext
form, # type: StyledForm
answer_data, # type: Any
):
"""Returns an HTML rendering of *form*."""
from django.template import loader
return loader.render_to_string(
"course/crispy-form.html",
context={"form": form},
request=request)
# }}}
# {{{ grader input
def make_grading_form(
self,
page_context: PageContext,
page_data: Any,
grade_data: Any,
) -> forms.Form:
"""
:arg grade_data: value returned by
:meth:`update_grade_data_from_grading_form_v2`. May be *None*.
:return:
a :class:`django.forms.Form` instance with *grade_data* prepopulated.
"""
return None
def post_grading_form(
self,
page_context: PageContext,
page_data: Any,
grade_data: Any,
post_data: Any,
files_data: Any,
) -> forms.Form:
"""Return a form with the POST response from *post_data* and *files_data*
filled in.
:return: a
:class:`django.forms.Form` instance with *grade_data* prepopulated.
"""
raise NotImplementedError()
def update_grade_data_from_grading_form_v2(
self,
request, # type: http.HttpRequest
page_context, # type: PageContext
page_data, # type: Any
grade_data, # type: Any
grading_form, # type: Any
files_data # type: Any
):
"""Return an updated version of *grade_data*, which is a
JSON-persistable object reflecting data on grading of this response.
This will be passed to other methods as *grade_data*.
"""
from warnings import warn
warn(_("%s is using the update_grade_data_from_grading_form "
"compatiblity hook, which "
"is deprecated.") % type(self).__name__,
DeprecationWarning)
return self.update_grade_data_from_grading_form(
page_context, page_data, grade_data, grading_form, files_data)
def grading_form_to_html(
self,
request, # type: http.HttpRequest
page_context, # type: PageContext
grading_form, # type: Any
grade_data # type: Any
):
# type: (...) -> Text
"""Returns an HTML rendering of *grading_form*."""
# http://bit.ly/2GxzWr1
from crispy_forms.utils import render_crispy_form
from django.template.context_processors import csrf
ctx = {} # type: Dict
ctx.update(csrf(request))
return render_crispy_form(grading_form, context=ctx)
# }}}
# {{{ grading/feedback
def grade(
self,
page_context, # type: PageContext
page_data, # type: Any
answer_data, # type: Any
grade_data, # type: Any
):
# type: (...) -> Optional[AnswerFeedback]
"""Grade the answer contained in *answer_data*.
:arg answer_data: value returned by :meth:`answer_data`,
or *None*, which means that no answer was supplied.
:arg grade_data: value updated by
:meth:`update_grade_data_from_grading_form_v2`
:return: a :class:`AnswerFeedback` instanstance, or *None* if the
grade is not yet available.
"""
raise NotImplementedError()
def correct_answer(
self,
page_context, # type: PageContext
page_data, # type: Any
answer_data, # type: Any
grade_data, # type: Any
):
# type: (...) -> Optional[Text]
"""The correct answer to this page's interaction, formatted as HTML,
or *None*.
"""
return None
def normalized_answer(
self,
page_context, # type: PageContext
page_data, # type: Any
answer_data # type: Any
):
# type: (...) -> Optional[Text]
"""An HTML-formatted answer to be used for summarization and
display in analytics.
"""
return None
def normalized_bytes_answer(
self,
page_context, # type: PageContext
page_data, # type: Any
answer_data, # type: Any
):
# type: (...) -> Optional[Tuple[Text, bytes]]
"""An answer to be used for batch download, given as a batch of bytes
to be stuffed in a zip file.
:returns: a tuple of ``(file_ext, data)`` where *file_ext* is a suggested
file extension (inlcuding the leading period, if applicable).
May also return *None*.
One use case of this function is to work as input for a plagiarism
checker.
"""
return None
# }}}
# }}}
# {{{ utility base classes
# }}}
# {{{ human text feedback page base
def create_default_point_scale(total_points):
"""
Return a scale that has sensible intervals for assigning points.
"""
if total_points <= 5:
incr = 0.25
elif total_points <= 10:
incr = 0.5
elif total_points <= 20:
incr = 1
else:
incr = 5
points = [as_int(idx*incr) for idx in range(int(total_points/incr))]
points.append(as_int(total_points))
return points
class PageBaseWithHumanTextFeedback(PageBase):
"""
.. automethod:: human_feedback_point_value
"""
grade_data_attrs = ["released", "grade_percent", "feedback_text", "notes"]
def human_feedback_point_value(self, page_context, page_data):
"""Subclasses can override this to make the point value of the human feedback known,
which will enable grade entry in points.
"""
return None
def grade(
self,
page_context, # type: PageContext
page_data, # type: Any
answer_data, # type: Any
grade_data, # type: Any
):
# type: (...) -> Optional[AnswerFeedback]
"""This method is appropriate if the grade consists *only* of the
feedback provided by humans. If more complicated/combined feedback
is desired, a subclass would likely override this.
"""
if answer_data is None:
return AnswerFeedback(correctness=0,
feedback=gettext_noop("No answer provided."))
if grade_data is None:
return None
if not grade_data["released"]:
return None
if (grade_data["grade_percent"] is not None
or grade_data["feedback_text"]):
if grade_data["grade_percent"] is not None:
correctness = grade_data["grade_percent"]/100
feedback_text = "<p>%s</p>" % get_auto_feedback(correctness)
else:
correctness = None
feedback_text = ""
if grade_data["feedback_text"]:
feedback_text += (
string_concat(
"<p>",
_("The following feedback was provided"),
":<p>")
+ markup_to_html(
page_context, grade_data["feedback_text"],
use_jinja=False))
return AnswerFeedback(
correctness=correctness,
feedback=feedback_text)
else:
return None
# }}}
# vim: foldmethod=marker
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
834,
22163,
4766,
834,
796,
366,
15269,
357,
34,
8,
1946,
33728,
14770,
2577,
694,
1008,
1,
198,
198,
834,
43085,
8... | 2.234105 | 8,902 |
# Data
import sys
import pandas as pd
import numpy as np
import re
from sqlalchemy import create_engine
from collections import defaultdict
import pickle
from warnings import filterwarnings
#Sklearn
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score
# NLP
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
nltk.download(['punkt','stopwords','wordnet'])
filterwarnings('ignore')
def load_data(database_filepath):
"""
Load data from a SQL Database.
Parameters
----------
database_filepath : str
Path to SQL DataBase
Returns
-------
X : pandas DataFrame
Messages DataFrame
Y : pandas DataFrame
Targets DataFrame
Y.columns:
Categories' names
"""
#Read data from Database
engine = create_engine(f'sqlite:///{database_filepath}')
df = pd.read_sql('disaster_data',engine)
# Break data into X and Y datasets
X = df['message']
Y = df.drop(['id', 'message', 'genre'], axis = 1)
Y = Y.astype(np.uint8)
#Return
return X, Y, Y.columns
def tokenize(text):
"""
Break text into tokens.
1. The case is normalizes and punctuation is removed
2. The text is then broken into words.
3. Finally, the words are converted by the WordNetLemmatizer()
Parameters
----------
text : str
Message to tokenize
Returns
-------
tokens : list
List of tokens
"""
#Get StopWords and Lemmatizer
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
# Normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# Tokenize text
tokens = word_tokenize(text)
# Lemmatize and remove Stop Words
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
def build_model():
"""
Create Model Pipeline.
Returns
-------
pipeline : MultiOutputClassifier Pipeline
Contains:
1. CountVectorizer with tokenize
2. TfIdfTransformer
3. RandomForest Classifiers
"""
#Steps of the Pipeline
steps = defaultdict(list)
steps["count"] = CountVectorizer(tokenizer = tokenize)
steps["tfidf"] = TfidfTransformer(norm='l1')
ranforest = RandomForestClassifier(n_estimators=100,
criterion='gini') #RandomForest
clf = MultiOutputClassifier(ranforest, n_jobs = -1) #Make MultiOutput CLF
steps["Classifier"] = clf #Add classifier to the end of the Pipeline
steps = list(steps.items()) #Convert Steps to list
pipeline = Pipeline(steps) #Make Pipeline
#GridSearch
#I'll leave this commented since it takes a lot of time to find
#the best parameters
params = {'Classifier__estimator__n_estimators': [100,200],
'Classifier__estimator__criterion': ['gini','entropy'],
'tfidf__norm': ['l1','l2']}
pipeline = GridSearchCV(estimator = pipeline, param_grid=params,
cv = 3, refit = True)
return pipeline
def evaluate_model(model, X_test, Y_test, category_names):
"""
Evaluate Model's accuracy, precision, recall and F1-score.
Saves a .csv file called 'evaluation_results.csv' containing
model's performance according to the metrics used.
Parameters
----------
model : scikit-learn Pipeline
MultiOutput Model to be evaluated
X_test : pandas DataFrame
Test values
Y_test : pandas DataFrame
Test targets
category_names : list
Categories' names
Returns
-------
None.
"""
Y_pred = model.predict(X_test) #Predict
#List to save Evaluation Metrics' Results
Acc = [] #Accuracy
Prc = [] #Precision
Rec = [] #Recall
F1 = [] #F1-Score
#Evaluate every column
for ind, col in enumerate(Y_test.columns):
y_true = Y_test[col]
y_pred = Y_pred[:,ind]
#Metrics
acc = accuracy_score(y_true, y_pred) #Accuracy
prc = precision_score(y_true, y_pred) #Precision
rec = recall_score(y_true, y_pred) #Recall
f1 = f1_score(y_true, y_pred) #F1-Score
Acc.append(acc)
Prc.append(prc)
Rec.append(rec)
F1.append(f1)
#Create dataset to save evaluation results into a .csv file
data = np.c_[Acc, Prc, Rec, F1]
Eval = pd.DataFrame(data, index = category_names,
columns = ['Accuracy','Precision','Recall', "F1-Score"])
Eval.to_csv('evaluation_results.csv')
def save_model(model, model_filepath):
"""
Serialize the model into a .pkl file.
Parameters
----------
model : scikit-learn Pipeline
MultiOutput Model to be evaluated
model_filepath : str
Path to serialize the model.
Must end with .pkl
Returns
-------
None.
"""
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
if __name__ == '__main__':
main() | [
2,
6060,
198,
11748,
25064,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
302,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
2298,
293,
198,... | 2.459435 | 2,231 |
import os
import collections
if __name__ == '__main__':
main()
| [
11748,
28686,
198,
11748,
17268,
628,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.777778 | 27 |
# Generated by Django 3.2.7 on 2021-09-15 21:27
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
22,
319,
33448,
12,
2931,
12,
1314,
2310,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#SCH1006.py --> JB_WORK_INVOICE.py
#**************************************************************************************************************
#
# Programmer : bibin
# Version : 1.0
#
# Description :
#
#
#
# Initial Creation:
#
# Date (YYYY-MM-DD) Change Description
# ----------------- ------------------
# 2018-10-24 Initial creation
#
#
#**************************************************************************************************************
# Importing required Lib
import logging
import sys
from time import gmtime, strftime
import cx_Oracle
import py4j
import pyspark
from dependencies.spark import start_spark
from dependencies.EbiReadWrite import EbiReadWrite
# Spark logging
logger = logging.getLogger(__name__)
# Date Formats
start_date = "'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
log_date =strftime("%Y%m%d", gmtime())
# Job Naming Details
script_name = "SCH1100.sh"
app_name = 'JB_WORK_INVOICE'
log_filename = app_name + '_' + log_date + '.log'
# Query for Extract data
# Main method
# Entry point for script
if __name__ == "__main__":
# Calling main() method
main()
| [
2,
50,
3398,
3064,
21,
13,
9078,
220,
14610,
449,
33,
62,
33249,
62,
1268,
29516,
8476,
13,
9078,
198,
198,
2,
17174,
17174,
17174,
46068,
1174,
198,
2,
198,
2,
6118,
647,
220,
220,
1058,
275,
571,
259,
198,
2,
10628,
220,
220,
... | 2.95202 | 396 |
#!/usr/bin/env python
from collections import defaultdict
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.lib.general import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
6738,
9355,
13,
45841,
1387,
62,
27604,
13,
45841,
1387,
62,
411,
14375,
1330,
7308,
21950,
198,
6738,
9355,
13,
8019,
13,
24622,
1330,
1... | 4.069767 | 43 |
"""
Parses MiRDeath database
:argument: DATA_FILE: http://www.rna-world.org/mirdeathdb/data/miRDeathDB_all_data.txt
:argument: DB_DESTINATION: saving location of database
"""
# Imports
from SLKlib.SQLiteDBApi.sqlite_db_api import PsimiSQL
# Defining constants
SQL_SEED = '../../../../../SLKlib/SQLiteDBApi/network-db-seed.sql'
DATA_FILE = 'files/miRDeathDB_all_data.txt'
DB_DESTINATION = '../../output/miRDeathDB'
'''
COLUMN HEADER EXAMPLE
0 miRNA_symbol miR-106b
1 miRBase_mature_ID "hsa-miR-106b-5p,hsa-miR-106b-3p"
2 miRBase_ID "MIMAT0000680,MIMAT0004672"
3 Gene_Symbol CDKN1A
4 Pathway apoptosis
5 Action_Mode down
6 Organism human
7 Tissue gastric cancer
8 PMID 18328430
9 tax_id 9606
10 geneid 1026
11 Synonyms CAP20|CDKN1|CIP1|MDA-6|P21|SDI1|WAF1|p21CIP1
12 Links HGNC:1784|MIM:116899|Ensembl:ENSG00000124762|HPRD:00298|Vega:OTTHUMG00000014603|M2D:hsa-miR-106b
13 chromosome 6
14 map_location 6p21.2
15 Description "cyclin-dependent kinase inhibitor 1A (p21, Cip1)"
16 type_of_gene protein-coding
17 Full_name_from_nomenclature_authority "cyclin-dependent kinase inhibitor 1A (p21, Cip1)"
18 Other_designations CDK-interacting protein 1|CDK-interaction protein 1|DNA synthesis inhibitor|cyclin-dependent kinase inhibitor 1|melanoma differentiation associated protein 6|wild-type p53-activated fragment 1
'''
if __name__ == '__main__':
print("Parsing database...")
main(logger = None)
print("Parsing database is completed. SQLite database is saved to: " + DB_DESTINATION)
| [
37811,
198,
47,
945,
274,
13756,
49,
20148,
6831,
198,
1058,
49140,
25,
42865,
62,
25664,
25,
2638,
1378,
2503,
13,
81,
2616,
12,
6894,
13,
2398,
14,
76,
1447,
68,
776,
9945,
14,
7890,
14,
11632,
49,
20148,
11012,
62,
439,
62,
789... | 1.702473 | 1,294 |
#!/usr/bin/env python3
from typing import Tuple
import sys
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm, trange
from nn_utils import freeze
from recipe_dataset import Dataset
from eval_utils import scoring, measure_time
from architecture import Processor, AbstractToGoal, AbstractToAbstract
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
# architecture
vocab_size = 4096
abstraction_dim = int(sys.argv[1])
predictor = AbstractToGoal(vocab_size=vocab_size, abstraction_dim=abstraction_dim).to(device)
abstract_to_abstract = AbstractToAbstract(abstraction_dim).to(device)
# Data
domains = list(range(7))
experiment_name = "%i domains [dim=%i] P" % (len(domains), abstraction_dim)
train_data1, train_data2, test_data = Dataset(sys.argv[2:]).split(0.75, 0.2, 0.05)
# find good abstractions on a random domain
processor = Processor(vocab_size=vocab_size, abstraction_dim=abstraction_dim).to(device)
loss_func = nn.CrossEntropyLoss()
system = nn.ModuleList([processor, predictor, abstract_to_abstract])
optimizer = torch.optim.Adam(system.parameters(), lr=0.001)
with measure_time("abstraction building"):
running_loss = 1
progress_bar = trange(2)
for epoch in progress_bar:
train_data1.reset()
while not train_data1.empty():
sentences = train_data1.get(64)
batch, y_true, _ = io(sentences, domain=1907)
abstraction = processor(batch)
task_pred = predictor(abstraction)
abstract_pred = abstract_to_abstract(abstraction)
dist = (abstract_pred[:, :, :-1] - abstraction[:, :, 1:]).pow(2).mean()
amplitude = torch.tanh(abstraction).pow(2).mean()
loss = loss_func(task_pred, y_true) + dist + 1 - amplitude
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss = 0.99 * running_loss + (1 - 0.99) * loss.item()
progress_bar.set_description("loss: %.5f" % running_loss)
for net in (processor, predictor):
freeze(net)
# fine-tune abstract_to_abstract
optimizer = torch.optim.Adam(abstract_to_abstract.parameters(), lr=0.001)
with measure_time("fine tuning"):
running_loss = 1
train_data2.reset()
with trange(1) as progress_bar:
while not train_data2.empty():
sentences = train_data2.get(64)
batch, y_true, _ = io(sentences, domain=1907)
with torch.no_grad():
abstraction = processor(batch)
abstract_pred = abstract_to_abstract(abstraction)
loss = (abstract_pred[:, :, :-1] - abstraction[:, :, 1:]).pow(2).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss = 0.99 * running_loss + (1 - 0.99) * loss.item()
progress_bar.set_description("loss: %.5f" % running_loss)
freeze(abstract_to_abstract)
# main training
processors = []
loss_func = nn.CrossEntropyLoss()
progress_bar = tqdm(domains, total=len(domains))
for domain in progress_bar:
with measure_time("neural network training"):
best_loss = np.inf
for attempt in range(3):
processor = Processor(
vocab_size=vocab_size,
abstraction_dim=abstraction_dim).to(device)
optimizer = torch.optim.Adam(processor.parameters(), lr=0.005)
running_loss = 1
train_data1.reset()
while not train_data1.empty():
sentences = train_data1.get(64)
batch, y_true, _ = io(sentences, domain)
abstraction = processor(batch)
task_pred = predictor(abstraction)
abstract_pred = abstract_to_abstract(abstraction)
dist = (abstract_pred[:, :, :-1] - abstraction[:, :, 1:]).pow(2).mean()
loss = loss_func(task_pred, y_true) + dist
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss = 0.99 * running_loss + (1 - 0.99) * loss.item()
progress_bar.set_description("loss: %.5f" % running_loss)
if running_loss < best_loss:
best_loss = running_loss
best_processor = processor
if running_loss < 0.95:
break # good enough
processors.append(best_processor.eval())
# domain identification evaluation
with scoring(experiment_name, "domain_accuracy.txt") as accuracy:
for correct_domain in trange(len(processors)):
processor = processors[correct_domain]
test_data.reset(shuffle=False)
while not test_data.empty():
sentences = test_data.get(1024)
batch, y_true, _ = io(sentences, correct_domain)
# reference abstraction of shape
# n_sentences x abstraction_dim x sentence_max_length
correct_abstraction = processor(batch)
# predicted abstraction
pred = abstract_to_abstract(correct_abstraction)
# align predictions with sentences
pred = pred[:, :, :-1]
batch = batch[:, 1:]
# run all processors
processed = torch.cat([
processor(batch).unsqueeze(3) for processor in processors
], dim=3)
# dist
dist = (pred.unsqueeze(3) - processed).pow(2).sum(dim=1)
min_dist = dist.min(dim=2)[1].cpu().numpy()
# measure domain selection accuracy
for j, sentence in enumerate(sentences):
for k in range(len(sentence)-1):
if min_dist[j, k] == correct_domain:
accuracy.register(100)
else:
accuracy.register(0)
# LM evaluation
with scoring(experiment_name, "LM_accuracy.txt") as accuracy:
with scoring(experiment_name, "LM_target_proba.txt") as proba:
for domain, processor in enumerate(tqdm(processors, total=len(processors))):
test_data.reset(shuffle=False)
while not test_data.empty():
sentences = test_data.get(1024)
batch, y_true, np_y_true = io(sentences, domain)
y_pred = predictor(processor(batch))
best = y_pred.max(dim=1)[1].cpu().numpy()
# measure accuracy
match = best == np_y_true
for j, sentence in enumerate(sentences):
for k in range(len(sentence)):
if match[j, k]:
accuracy.register(100)
else:
accuracy.register(0)
# measure probability of target
probas = torch.softmax(y_pred, dim=1).cpu().numpy()
for j, sentence in enumerate(sentences):
for k in range(len(sentence)):
p = probas[j, np_y_true[j, k], k]
proba.register(100*p)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
19720,
1330,
309,
29291,
198,
11748,
25064,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
80,
36020,
... | 2.120638 | 3,324 |
import numpy as np
import pandas as pd
from staircase.constants import inf
from staircase.core.arrays import docstrings
from staircase.core.stairs import Stairs
from staircase.core.stats.statistic import corr as _corr
from staircase.core.stats.statistic import cov as _cov
from staircase.util._decorators import Appender
@Appender(docstrings.sample_example, join="\n", indents=1)
def sample(collection, x):
"""
Takes a dict-like collection of Stairs instances and evaluates their values across a common set of points.
Technically the results of this function should be considered as :math:`\\lim_{x \\to z^{-}} f(x)`
or :math:`\\lim_{x \\to z^{+}} f(x)`, when how = 'left' or how = 'right' respectively. See
:ref:`A note on interval endpoints<getting_started.interval_endpoints>` for an explanation.
Parameters
----------
collection : array-like, dictionary or pandas.Series
The Stairs instances at which to evaluate
x : scalar or vector data
The points at which to sample the Stairs instances. Must belong to the step function domain.
Returns
-------
:class:`pandas.DataFrame`
A dataframe, where rows correspond to the Stairs instances in *collection*,
and column correspond to the points in *x*. If *collection* is a dictionary then the
resulting dataframe will be indexed by the dictionary keys. If *collection* is a
:class:`pandas.Series` then the dataframe will have the same index as the series.
See Also
--------
Stairs.sample
"""
array = pd.Series(collection)
return array.apply(Stairs.sample, x=x, include_index=True)
@Appender(docstrings.limit_example, join="\n", indents=1)
def limit(collection, x, side="right"):
"""
Takes a dict-like collection of Stairs instances and evaluates their values across a common set of points.
Technically the results of this function should be considered as :math:`\\lim_{x \\to z^{-}} f(x)`
or :math:`\\lim_{x \\to z^{+}} f(x)`, when how = 'left' or how = 'right' respectively. See
:ref:`A note on interval endpoints<getting_started.interval_endpoints>` for an explanation.
Parameters
----------
collection : array-like, dictionary or pandas.Series
The Stairs instances at which to evaluate
x : scalar or vector data
The points at which to sample the Stairs instances. Must belong to the step function domain.
side : {'left', 'right'}, default 'right'
if points where step changes occur do not coincide with x then this parameter
has no effect. Where a step changes occurs at a point given by x, this parameter
determines if the step function is evaluated at the interval to the left, or the right.
Returns
-------
:class:`pandas.DataFrame`
A dataframe, where rows correspond to the Stairs instances in *collection*,
and column correspond to the points in *x*. If *collection* is a dictionary then the
resulting dataframe will be indexed by the dictionary keys. If *collection* is a
:class:`pandas.Series` then the dataframe will have the same index as the series.
See Also
--------
Stairs.sample
"""
array = pd.Series(collection)
return array.apply(Stairs.limit, x=x, side=side, include_index=True)
corr = _make_corr_cov_func(docstrings.corr_docstring, _corr, assume_ones_diagonal=True)
cov = _make_corr_cov_func(docstrings.cov_docstring, _cov, assume_ones_diagonal=False)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
27656,
13,
9979,
1187,
1330,
1167,
198,
6738,
27656,
13,
7295,
13,
3258,
592,
1330,
2205,
37336,
198,
6738,
27656,
13,
7295,
13,
17617,
1330,
520,
34... | 3.053043 | 1,150 |
from cone.app import compat
from cone.app import testing
from cone.app.browser.ajax import AjaxEvent
from cone.app.browser.ajax import AjaxMessage
from cone.app.browser.ajax import AjaxPath
from cone.app.browser.authoring import _FormRenderingTile
from cone.app.browser.authoring import add
from cone.app.browser.authoring import AddFormHeading
from cone.app.browser.authoring import CameFromNext
from cone.app.browser.authoring import ContentAddForm
from cone.app.browser.authoring import ContentEditForm
from cone.app.browser.authoring import ContentForm
from cone.app.browser.authoring import edit
from cone.app.browser.authoring import EditFormHeading
from cone.app.browser.authoring import FormHeading
from cone.app.browser.authoring import is_ajax
from cone.app.browser.authoring import overlayadd
from cone.app.browser.authoring import OverlayAddForm
from cone.app.browser.authoring import overlayedit
from cone.app.browser.authoring import OverlayEditForm
from cone.app.browser.authoring import OverlayForm
from cone.app.browser.authoring import overlayform
from cone.app.browser.authoring import render_form
from cone.app.browser.form import Form
from cone.app.model import AdapterNode
from cone.app.model import BaseNode
from cone.app.model import get_node_info
from cone.app.model import node_info
from cone.app.model import NodeInfo
from cone.app.model import register_node_info
from cone.tile import render_tile
from cone.tile import tile
from cone.tile.tests import TileTestCase
from plumber import plumbing
from webob.exc import HTTPFound
from yafowil.base import factory
from zope.interface import implementer
from zope.interface import Interface
| [
6738,
27763,
13,
1324,
1330,
8330,
198,
6738,
27763,
13,
1324,
1330,
4856,
198,
6738,
27763,
13,
1324,
13,
40259,
13,
1228,
897,
1330,
46362,
9237,
198,
6738,
27763,
13,
1324,
13,
40259,
13,
1228,
897,
1330,
46362,
12837,
198,
6738,
2... | 3.681416 | 452 |
import sys
import terminal
import ftp_server
import ftp_client
if __name__ == '__main__':
main()
| [
11748,
25064,
198,
11748,
12094,
198,
11748,
10117,
79,
62,
15388,
198,
11748,
10117,
79,
62,
16366,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.810811 | 37 |
import json
import os
import re
import base64
import io
import aiohttp
import discord
from redbot.core import checks, data_manager, commands
from redbot.core.utils.chat_formatting import inline, box, pagify
R_MESSAGE_LINK = r"https://discordapp.com/channels/(\d+)/(\d+)/(\d+)"
R_ATTATCH_LINK = r"https://cdn.discordapp.com/attachments/\d+/\d+/.+"
class DataTransfer(commands.Cog):
"""Transfer cog data."""
async def red_get_data_for_user(self, *, user_id):
"""Get a user's personal data."""
data = "No data is stored for user with ID {}.\n".format(user_id)
return {"user_data.txt": BytesIO(data.encode())}
async def red_delete_data_for_user(self, *, requester, user_id):
"""Delete a user's personal data.
No personal data is stored in this cog.
"""
return
@commands.group(name="export")
@commands.guild_only()
@checks.mod_or_permissions(administrator=True)
async def _export(self, ctx):
"""Get a .enc file to load data on another bot via the [p]import command."""
@commands.group(name="import")
@commands.guild_only()
@checks.mod_or_permissions(administrator=True)
async def _import(self, ctx):
"""Load data from another bot via an attatched .enc file. (Obtain this with [p]export)"""
@_import.command(name="alias")
@_export.command(name="alias")
@_import.command(name="customcom", aliases=["cc", "customcommands", "customcommand"])
@_export.command(name="customcom", aliases=["cc", "customcommands", "customcommand"])
@_import.command(name="memes", aliases=["meme"])
@_export.command(name="memes", aliases=["meme"])
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
2779,
2414,
198,
11748,
33245,
198,
11748,
257,
952,
4023,
198,
198,
11748,
36446,
198,
6738,
2266,
13645,
13,
7295,
1330,
8794,
11,
1366,
62,
37153,
11,
9729,
198,
6738,
226... | 2.606864 | 641 |
# pylint: disable=protected-access,missing-function-docstring, missing-class-docstring, missing-module-docstring, missing-class-docstring # noqa
# -*- coding: utf-8 -*-
import unittest
import tempfile
from pathlib import Path
from mlspeclib.helpers import convert_yaml_to_dict
import marshmallow.class_registry
from mlspeclib.mlschema import MLSchema
from mlspeclib.mlobject import MLObject
class liveSchemaTestSuite(unittest.TestCase): # pylint: disable=invalid-name
"""live schema test cases."""
default_registry = None
if __name__ == "__main__":
unittest.main()
| [
2,
279,
2645,
600,
25,
15560,
28,
24326,
12,
15526,
11,
45688,
12,
8818,
12,
15390,
8841,
11,
4814,
12,
4871,
12,
15390,
8841,
11,
4814,
12,
21412,
12,
15390,
8841,
11,
4814,
12,
4871,
12,
15390,
8841,
1303,
645,
20402,
198,
2,
53... | 2.944724 | 199 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2020.2.9),
on Mon Feb 1 17:31:07 2021
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
import psychopy
psychopy.useVersion('2020.2')
from psychopy import locale_setup
from psychopy import prefs
prefs.hardware['audioLib'] = 'sounddevice'
prefs.hardware['audioLatencyMode'] = '3'
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '2020.2.9'
expName = 'sequence_LD' # from the Builder filename that created this script
expInfo = {'Subject ID': 'ECXXX', 'TDT Block': 'BXX', 'block_type': '2'}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['Subject ID'], expName, expInfo['TDT Block'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='/Users/lauragwilliams/Dropbox (UCSF Department of Neurological Surgery)/lg/lexical_access/new_task/sequence-lexical_decision/sequence-2_LD_lastrun.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.DEBUG)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run after the window creation
# Setup the Window
win = visual.Window(
size=[1440, 900], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "trial"
trialClock = core.Clock()
text = visual.TextStim(win=win, name='text',
text='In this experiment, you will hear spoken sounds:\n\ne.g. “tenticles”\n\ne.g. “senticles”\n\nAfter each sound, decide whether what you hear is an existing word of English.\n\nPress the space bar to see an example.',
font='Arial',
pos=(0, 0), height=0.03, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp = keyboard.Keyboard()
# Initialize components for Routine "example_response"
example_responseClock = core.Clock()
text_7 = visual.TextStim(win=win, name='text_7',
text='After hearing each word, you will see this on the screen.\n\nPress “Z” when it IS a word.\n\nPress “M” when it is NOT a word.',
font='Arial',
pos=(0, 0.3), height=0.02, wrapWidth=None, ori=0,
color='black', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
text_3 = visual.TextStim(win=win, name='text_3',
text='WORD',
font='Arial',
pos=(-0.3, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-1.0);
text_5 = visual.TextStim(win=win, name='text_5',
text='NON-WORD',
font='Arial',
pos=(0.3, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-2.0);
key_resp_5 = keyboard.Keyboard()
# Initialize components for Routine "begin"
beginClock = core.Clock()
text_6 = visual.TextStim(win=win, name='text_6',
text='Press SPACE to begin!',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_2 = keyboard.Keyboard()
# Initialize components for Routine "audio"
audioClock = core.Clock()
audio_word = sound.Sound('A', secs=-1, stereo=True, hamming=True,
name='audio_word')
audio_word.setVolume(1)
# Initialize components for Routine "response"
responseClock = core.Clock()
option_left = visual.TextStim(win=win, name='option_left',
text='WORD',
font='Arial',
pos=(-0.3, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
option_right = visual.TextStim(win=win, name='option_right',
text='NON-WORD',
font='Arial',
pos=(0.3, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-1.0);
key_resp_4 = keyboard.Keyboard()
# Initialize components for Routine "mturk_code"
mturk_codeClock = core.Clock()
text_4 = visual.TextStim(win=win, name='text_4',
text='default text',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='Black', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_3 = keyboard.Keyboard()
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "trial"-------
continueRoutine = True
# update component parameters for each repeat
key_resp.keys = []
key_resp.rt = []
_key_resp_allKeys = []
# keep track of which components have finished
trialComponents = [text, key_resp]
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text* updates
if text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text.frameNStart = frameN # exact frame index
text.tStart = t # local t and not account for scr refresh
text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text, 'tStartRefresh') # time at next scr refresh
text.setAutoDraw(True)
# *key_resp* updates
waitOnFlip = False
if key_resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp.frameNStart = frameN # exact frame index
key_resp.tStart = t # local t and not account for scr refresh
key_resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp, 'tStartRefresh') # time at next scr refresh
key_resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp.status == STARTED and not waitOnFlip:
theseKeys = key_resp.getKeys(keyList=['y', 'n', 'left', 'right', 'space', 'return'], waitRelease=False)
_key_resp_allKeys.extend(theseKeys)
if len(_key_resp_allKeys):
key_resp.keys = _key_resp_allKeys[-1].name # just the last key pressed
key_resp.rt = _key_resp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text.started', text.tStartRefresh)
thisExp.addData('text.stopped', text.tStopRefresh)
# check responses
if key_resp.keys in ['', [], None]: # No response was made
key_resp.keys = None
thisExp.addData('key_resp.keys',key_resp.keys)
if key_resp.keys != None: # we had a response
thisExp.addData('key_resp.rt', key_resp.rt)
thisExp.addData('key_resp.started', key_resp.tStartRefresh)
thisExp.addData('key_resp.stopped', key_resp.tStopRefresh)
thisExp.nextEntry()
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "example_response"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_5.keys = []
key_resp_5.rt = []
_key_resp_5_allKeys = []
# keep track of which components have finished
example_responseComponents = [text_7, text_3, text_5, key_resp_5]
for thisComponent in example_responseComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
example_responseClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "example_response"-------
while continueRoutine:
# get current time
t = example_responseClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=example_responseClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_7* updates
if text_7.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_7.frameNStart = frameN # exact frame index
text_7.tStart = t # local t and not account for scr refresh
text_7.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_7, 'tStartRefresh') # time at next scr refresh
text_7.setAutoDraw(True)
# *text_3* updates
if text_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_3.frameNStart = frameN # exact frame index
text_3.tStart = t # local t and not account for scr refresh
text_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_3, 'tStartRefresh') # time at next scr refresh
text_3.setAutoDraw(True)
# *text_5* updates
if text_5.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_5.frameNStart = frameN # exact frame index
text_5.tStart = t # local t and not account for scr refresh
text_5.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_5, 'tStartRefresh') # time at next scr refresh
text_5.setAutoDraw(True)
# *key_resp_5* updates
waitOnFlip = False
if key_resp_5.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_5.frameNStart = frameN # exact frame index
key_resp_5.tStart = t # local t and not account for scr refresh
key_resp_5.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_5, 'tStartRefresh') # time at next scr refresh
key_resp_5.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_5.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_5.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_5.status == STARTED and not waitOnFlip:
theseKeys = key_resp_5.getKeys(keyList=['z', 'm', 'left', 'right', 'space'], waitRelease=False)
_key_resp_5_allKeys.extend(theseKeys)
if len(_key_resp_5_allKeys):
key_resp_5.keys = _key_resp_5_allKeys[-1].name # just the last key pressed
key_resp_5.rt = _key_resp_5_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in example_responseComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "example_response"-------
for thisComponent in example_responseComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_7.started', text_7.tStartRefresh)
thisExp.addData('text_7.stopped', text_7.tStopRefresh)
thisExp.addData('text_3.started', text_3.tStartRefresh)
thisExp.addData('text_3.stopped', text_3.tStopRefresh)
thisExp.addData('text_5.started', text_5.tStartRefresh)
thisExp.addData('text_5.stopped', text_5.tStopRefresh)
# check responses
if key_resp_5.keys in ['', [], None]: # No response was made
key_resp_5.keys = None
thisExp.addData('key_resp_5.keys',key_resp_5.keys)
if key_resp_5.keys != None: # we had a response
thisExp.addData('key_resp_5.rt', key_resp_5.rt)
thisExp.addData('key_resp_5.started', key_resp_5.tStartRefresh)
thisExp.addData('key_resp_5.stopped', key_resp_5.tStopRefresh)
thisExp.nextEntry()
# the Routine "example_response" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "begin"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_2.keys = []
key_resp_2.rt = []
_key_resp_2_allKeys = []
# keep track of which components have finished
beginComponents = [text_6, key_resp_2]
for thisComponent in beginComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
beginClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "begin"-------
while continueRoutine:
# get current time
t = beginClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=beginClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_6* updates
if text_6.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_6.frameNStart = frameN # exact frame index
text_6.tStart = t # local t and not account for scr refresh
text_6.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_6, 'tStartRefresh') # time at next scr refresh
text_6.setAutoDraw(True)
# *key_resp_2* updates
waitOnFlip = False
if key_resp_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.tStart = t # local t and not account for scr refresh
key_resp_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_2, 'tStartRefresh') # time at next scr refresh
key_resp_2.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_2.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_2.status == STARTED and not waitOnFlip:
theseKeys = key_resp_2.getKeys(keyList=['y', 'n', 'left', 'right', 'space'], waitRelease=False)
_key_resp_2_allKeys.extend(theseKeys)
if len(_key_resp_2_allKeys):
key_resp_2.keys = _key_resp_2_allKeys[-1].name # just the last key pressed
key_resp_2.rt = _key_resp_2_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in beginComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "begin"-------
for thisComponent in beginComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_6.started', text_6.tStartRefresh)
thisExp.addData('text_6.stopped', text_6.tStopRefresh)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys = None
thisExp.addData('key_resp_2.keys',key_resp_2.keys)
if key_resp_2.keys != None: # we had a response
thisExp.addData('key_resp_2.rt', key_resp_2.rt)
thisExp.addData('key_resp_2.started', key_resp_2.tStartRefresh)
thisExp.addData('key_resp_2.stopped', key_resp_2.tStopRefresh)
thisExp.nextEntry()
# the Routine "begin" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=2, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('barakeet_trial_info-2.xlsx'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "audio"-------
continueRoutine = True
# update component parameters for each repeat
audio_word.setSound(wav_file, hamming=True)
audio_word.setVolume(1, log=False)
# keep track of which components have finished
audioComponents = [audio_word]
for thisComponent in audioComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
audioClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "audio"-------
while continueRoutine:
# get current time
t = audioClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=audioClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# start/stop audio_word
if audio_word.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
audio_word.frameNStart = frameN # exact frame index
audio_word.tStart = t # local t and not account for scr refresh
audio_word.tStartRefresh = tThisFlipGlobal # on global time
audio_word.play(when=win) # sync with win flip
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in audioComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "audio"-------
for thisComponent in audioComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
audio_word.stop() # ensure sound has stopped at end of routine
trials.addData('audio_word.started', audio_word.tStartRefresh)
trials.addData('audio_word.stopped', audio_word.tStopRefresh)
# the Routine "audio" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "response"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_4.keys = []
key_resp_4.rt = []
_key_resp_4_allKeys = []
# keep track of which components have finished
responseComponents = [option_left, option_right, key_resp_4]
for thisComponent in responseComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
responseClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "response"-------
while continueRoutine:
# get current time
t = responseClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=responseClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *option_left* updates
if option_left.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
option_left.frameNStart = frameN # exact frame index
option_left.tStart = t # local t and not account for scr refresh
option_left.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(option_left, 'tStartRefresh') # time at next scr refresh
option_left.setAutoDraw(True)
# *option_right* updates
if option_right.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
option_right.frameNStart = frameN # exact frame index
option_right.tStart = t # local t and not account for scr refresh
option_right.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(option_right, 'tStartRefresh') # time at next scr refresh
option_right.setAutoDraw(True)
# *key_resp_4* updates
waitOnFlip = False
if key_resp_4.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_4.frameNStart = frameN # exact frame index
key_resp_4.tStart = t # local t and not account for scr refresh
key_resp_4.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_4, 'tStartRefresh') # time at next scr refresh
key_resp_4.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_4.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_4.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_4.status == STARTED and not waitOnFlip:
theseKeys = key_resp_4.getKeys(keyList=['z', 'm', 'left', 'right', 'space'], waitRelease=False)
_key_resp_4_allKeys.extend(theseKeys)
if len(_key_resp_4_allKeys):
key_resp_4.keys = _key_resp_4_allKeys[-1].name # just the last key pressed
key_resp_4.rt = _key_resp_4_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in responseComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "response"-------
for thisComponent in responseComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('option_left.started', option_left.tStartRefresh)
trials.addData('option_left.stopped', option_left.tStopRefresh)
trials.addData('option_right.started', option_right.tStartRefresh)
trials.addData('option_right.stopped', option_right.tStopRefresh)
# check responses
if key_resp_4.keys in ['', [], None]: # No response was made
key_resp_4.keys = None
trials.addData('key_resp_4.keys',key_resp_4.keys)
if key_resp_4.keys != None: # we had a response
trials.addData('key_resp_4.rt', key_resp_4.rt)
trials.addData('key_resp_4.started', key_resp_4.tStartRefresh)
trials.addData('key_resp_4.stopped', key_resp_4.tStopRefresh)
# the Routine "response" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 2 repeats of 'trials'
# ------Prepare to start Routine "mturk_code"-------
continueRoutine = True
# update component parameters for each repeat
text_4.setText('Thank you for taking part!\n\nPress SPACE to finish.')
key_resp_3.keys = []
key_resp_3.rt = []
_key_resp_3_allKeys = []
# keep track of which components have finished
mturk_codeComponents = [text_4, key_resp_3]
for thisComponent in mturk_codeComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
mturk_codeClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "mturk_code"-------
while continueRoutine:
# get current time
t = mturk_codeClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=mturk_codeClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_4* updates
if text_4.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_4.frameNStart = frameN # exact frame index
text_4.tStart = t # local t and not account for scr refresh
text_4.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_4, 'tStartRefresh') # time at next scr refresh
text_4.setAutoDraw(True)
# *key_resp_3* updates
waitOnFlip = False
if key_resp_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.tStart = t # local t and not account for scr refresh
key_resp_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_3, 'tStartRefresh') # time at next scr refresh
key_resp_3.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_3.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_3.status == STARTED and not waitOnFlip:
theseKeys = key_resp_3.getKeys(keyList=['y', 'n', 'left', 'right', 'space', 'return'], waitRelease=False)
_key_resp_3_allKeys.extend(theseKeys)
if len(_key_resp_3_allKeys):
key_resp_3.keys = _key_resp_3_allKeys[-1].name # just the last key pressed
key_resp_3.rt = _key_resp_3_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in mturk_codeComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "mturk_code"-------
for thisComponent in mturk_codeComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_4.started', text_4.tStartRefresh)
thisExp.addData('text_4.stopped', text_4.tStopRefresh)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys = None
thisExp.addData('key_resp_3.keys',key_resp_3.keys)
if key_resp_3.keys != None: # we had a response
thisExp.addData('key_resp_3.rt', key_resp_3.rt)
thisExp.addData('key_resp_3.started', key_resp_3.tStartRefresh)
thisExp.addData('key_resp_3.stopped', key_resp_3.tStopRefresh)
thisExp.nextEntry()
# the Routine "mturk_code" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv', delim='auto')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| [
171,
119,
123,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1212,
6306,
373,
2727,
1262,
38955,
20519,
18,
29544,
35869,
357,
85,
42334,
13,
17,
13,
2... | 2.594254 | 13,262 |
# -*- coding: utf-8 -*-
from knitter import datadriver, log
from demoprj.page import KnitterDemo
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
638,
1967,
1330,
4818,
324,
38291,
11,
2604,
198,
6738,
1357,
404,
81,
73,
13,
7700,
1330,
6102,
1967,
11522,
78,
628,
628,
628,
628,
628,
628,
628,
198
] | 2.488889 | 45 |
import unittest
from prob_distrs import Exponential
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
1861,
62,
17080,
3808,
1330,
5518,
35470,
198,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.560976 | 41 |
import json
from http import HTTPStatus
from typing import Any, Dict
from aws_lambda_powertools.logging.logger import Logger
from aws_lambda_powertools.utilities.typing import LambdaContext
logger: Logger = Logger(service='my_service') # JSON output format, service name can be set by environment variable "POWERTOOLS_SERVICE_NAME"
| [
11748,
33918,
198,
6738,
2638,
1330,
14626,
19580,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
198,
198,
6738,
3253,
82,
62,
50033,
62,
6477,
31391,
13,
6404,
2667,
13,
6404,
1362,
1330,
5972,
1362,
198,
6738,
3253,
82,
62,
50033,
62,... | 3.428571 | 98 |
from pathlib import Path
from advent.solutions import day07
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
19980,
13,
82,
14191,
1330,
1110,
2998,
628,
198
] | 3.705882 | 17 |
import json
import logging.config
import yaml
| [
11748,
33918,
198,
11748,
18931,
13,
11250,
198,
11748,
331,
43695,
628,
198
] | 3.692308 | 13 |
import requests
from bs4 import BeautifulSoup
from openpyxl import Workbook
from openpyxl import load_workbook
import winreg
#额外任务,获取当前用户的桌面路径,这样就可以把抓到的Excel表格存到桌面了
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders')
mydesktop = winreg.QueryValueEx(key,"Desktop")[0]
mydesktop = mydesktop.replace('\\', '\\\\')
#获取MTM编号
mtm = input('请输入MTM Number: ')
url = 'https://psref.lenovo.com/Search?kw='+mtm
#验证MTM是否存在
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/51.0.2704.63 Safari/537.36'}
res = requests.get(url,headers=headers)
html = res.text
soup = BeautifulSoup( html,'html.parser')
item = soup.find('p',class_='filtered_title')
try:
if item.text != '':
print('找到了此MTM')
#如果找到了MTM,开始抓取MTM配置
#先得找MTM页面地址
item = soup.find('ul',class_='modets_list')
#拼接MTM地址
mtmurl = 'https://psref.lenovo.com/'+item.attrs['modeldetaillinkpart'].replace('{ModelCode}',mtm)
#打开MTM地址,获取配置信息
res = requests.get(mtmurl,headers=headers)
html = res.text
soup = BeautifulSoup( html,'html.parser')
table = soup.find('table',class_='SpecValueTable')
#开始写入Excel表格
wb = Workbook()
ws = wb.active
lists = []
for tr in table.find_all('tr'):
for td in tr.find_all('td'):
lists.append(td.text)
ws.append(lists)
lists.clear()
wb.save(mydesktop+'\\\\MTM_Config.xlsx')
except AttributeError:
print('未发现此MTM, 请检查输入的MTM是否正确')
| [
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
220,
1280,
9078,
87,
75,
1330,
220,
5521,
2070,
220,
198,
6738,
1280,
9078,
87,
75,
1330,
3440,
62,
1818,
2070,
198,
11748,
1592,
2301,
198,
198,
2,
165,
95,
... | 1.799766 | 854 |
"""RLBotChoreography
Usage:
choreograph [--min-bots=<min>] [--bot-folder=<folder>]
Options:
--min-bots=<min> The minimum number of bots to spawn [default: 10].
--bot-folder=<folder> Searches this folder for bot configs to use for names and appearances [default: .].
"""
import traceback
import time
try:
import copy
import os
import sys
from docopt import docopt
from rlbot.matchconfig.conversions import parse_match_config
from rlbot.parsing.agent_config_parser import load_bot_appearance
from rlbot.parsing.directory_scanner import scan_directory_for_bot_configs
from rlbot.parsing.rlbot_config_parser import create_bot_config_layout
from rlbot.setup_manager import SetupManager
from rlbot.utils.structures.start_match_structures import MAX_PLAYERS
from hivemind import Hivemind
if __name__ == '__main__':
arguments = docopt(__doc__)
min_bots = 16 # min(int(arguments['--min-bots']), MAX_PLAYERS)
bot_directory = arguments['--bot-folder']
bundles = scan_directory_for_bot_configs(bot_directory)
# Set up RLBot.cfg
framework_config = create_bot_config_layout()
config_location = os.path.join(os.path.dirname(__file__), 'rlbot.cfg')
framework_config.parse_file(config_location, max_index=MAX_PLAYERS)
match_config = parse_match_config(framework_config, config_location, {}, {})
looks_configs = {idx: bundle.get_looks_config() for idx, bundle in enumerate(bundles)}
names = [bundle.name for bundle in bundles]
mc_id = 0
mc_2_id = 0
player_id = 0
# Figures out which config is which
for i, cfg in enumerate(match_config.player_configs):
loadout = cfg.loadout_config
if loadout.hat_id == 1332:
if loadout.car_id == 23:
mc_id = i + 1
else:
mc_2_id = i + 1
else:
player_id = i + 1
if mc_2_id and mc_id and player_id:
print("Detected got all ids")
break
mc_id -= 1
mc_2_id -= 1
player_id -= 1
if mc_id < 0:
mc_config = match_config.player_configs[0]
else:
mc_config = match_config.player_configs[mc_id]
mc_config_2 = match_config.player_configs[mc_2_id]
player_config = match_config.player_configs[player_id]
match_config.player_configs.clear()
for i in range(min_bots):
if mc_2_id >= 0:
# copied = copy.copy(player_config if i >= 2 else (mc_config_2 if i else mc_config))
copied = copy.copy(player_config if i >= 1 else mc_config_2)
if i >= 4:
c2 = copy.copy(copied.loadout_config)
c2.boost_id = 32
copied.loadout_config = c2
else:
copied = copy.copy(mc_config)
match_config.player_configs.append(copied)
manager = SetupManager()
manager.load_match_config(match_config, {})
manager.connect_to_game()
manager.start_match()
hivemind = Hivemind()
hivemind.start()
except Exception as e:
print("Exception occured")
time.sleep(1)
print(traceback.format_exc())
while True:
pass
print("end")
time.sleep(2) | [
37811,
7836,
20630,
1925,
382,
4867,
198,
198,
28350,
25,
198,
220,
220,
220,
30569,
2384,
685,
438,
1084,
12,
42478,
28,
27,
1084,
37981,
685,
438,
13645,
12,
43551,
28,
27,
43551,
37981,
198,
198,
29046,
25,
198,
220,
220,
220,
13... | 2.446384 | 1,203 |
# -*- coding: utf-8 -*-
# Copyright 2017-TODAY LasLabs Inc.
# License MIT (https://opensource.org/licenses/MIT).
import mock
import unittest
from contextlib import contextmanager
from ..request_paginator import RequestPaginator
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2177,
12,
51,
3727,
4792,
10123,
43,
8937,
3457,
13,
198,
2,
13789,
17168,
357,
5450,
1378,
44813,
1668,
13,
2398,
14,
677,
4541,
14,
36393,
737,
198,
198,
... | 3.093333 | 75 |
# -*- coding: utf-8 -*-
# Copyright 2018-2022 the orix developers
#
# This file is part of orix.
#
# orix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with orix. If not, see <http://www.gnu.org/licenses/>.
"""Point transformations of objects.
Rotations are transformations of three-dimensional space leaving the
origin in place. Rotations can be parametrized numerous ways, but in
orix are handled as unit quaternions. Rotations can act on vectors, or
other rotations, but not scalars. They are often most easily visualised
as being a turn of a certain angle about a certain axis.
.. image:: /_static/img/rotation.png
:width: 200px
:alt: Rotation of an object illustrated with an axis and rotation angle.
:align: center
Rotations can also be *improper*. An improper rotation in orix operates
on vectors as a rotation by the unit quaternion, followed by inversion.
Hence, a mirroring through the x-y plane can be considered an improper
rotation of 180° about the z-axis, illustrated in the figure below.
.. image:: /_static/img/inversion.png
:width: 200px
:alt: 180° rotation followed by inversion, leading to a mirror operation.
:align: center
"""
import warnings
import dask.array as da
from dask.diagnostics import ProgressBar
import numpy as np
from scipy.special import hyp0f1
from orix._util import deprecated_argument
from orix.quaternion import Quaternion
from orix.vector import AxAngle, Vector3d
# Used to round values below 1e-16 to zero
_FLOAT_EPS = np.finfo(float).eps
class Rotation(Quaternion):
"""Rotation object.
Rotations support the following mathematical operations:
- Unary negation.
- Inversion.
- Multiplication with other rotations and vectors.
Rotations inherit all methods from :class:`Quaternion` although
behaviour is different in some cases.
Rotations can be converted to other parametrizations, notably the
neo-Euler representations. See :class:`NeoEuler`.
"""
def __eq__(self, other):
"""Check if Rotation objects are equal by their shape and values."""
# only return equal if shape, values, and improper arrays are equal
if (
isinstance(other, Rotation)
and self.shape == other.shape
and np.allclose(self.data, other.data)
and np.allclose(self.improper, other.improper)
):
return True
else:
return False
def unique(self, return_index=False, return_inverse=False, antipodal=True):
"""Returns a new object containing only this object's unique
entries.
Two rotations are not unique if they have the same propriety
AND:
- they have the same numerical value OR
- the numerical value of one is the negative of the other
Parameters
----------
return_index : bool, optional
If True, will also return the indices of the (flattened)
data where the unique entries were found.
return_inverse : bool, optional
If True, will also return the indices to reconstruct the
(flattened) data from the unique data.
antipodal : bool, optional
If False, rotations representing the same transformation
whose values are numerically different (negative) will *not*
be considered unique.
"""
if len(self.data) == 0:
return self.__class__(self.data)
rotation = self.flatten()
if antipodal:
abcd = rotation._differentiators()
else:
abcd = np.stack(
[
rotation.a,
rotation.b,
rotation.c,
rotation.d,
rotation.improper,
],
axis=-1,
).round(12)
_, idx, inv = np.unique(abcd, axis=0, return_index=True, return_inverse=True)
idx_argsort = np.argsort(idx)
idx_sort = idx[idx_argsort]
# build inverse index map
inv_map = np.empty_like(idx_argsort)
inv_map[idx_argsort] = np.arange(idx_argsort.size)
inv = inv_map[inv]
dat = rotation[idx_sort]
dat.improper = rotation.improper[idx_sort]
if return_index and return_inverse:
return dat, idx_sort, inv
elif return_index and not return_inverse:
return dat, idx_sort
elif return_inverse and not return_index:
return dat, inv
else:
return dat
def angle_with(self, other):
"""The angle of rotation transforming this rotation to the
other.
Returns
-------
numpy.ndarray
See also
--------
angle_with
"""
other = Rotation(other)
dot_products = self.unit.dot(other.unit)
# Round because some dot products are slightly above 1
dot_products = np.round(dot_products, np.finfo(dot_products.dtype).precision)
angles = np.nan_to_num(np.arccos(2 * dot_products**2 - 1))
return angles
def angle_with_outer(self, other):
"""The angle of rotation transforming this rotation to the other.
Parameters
----------
other : Rotation
Returns
-------
numpy.ndarray
See also
--------
angle_with
Examples
--------
>>> from orix.quaternion import Rotation
>>> r1 = Rotation.random((5, 3))
>>> r2 = Rotation.random((6, 2))
>>> dist = r1.angle_with_outer(r2)
>>> dist.shape
(5, 3, 6, 2)
"""
dot_products = self.unit.dot_outer(other.unit)
angles = np.nan_to_num(np.arccos(2 * dot_products**2 - 1))
return angles
def outer(self, other, lazy=False, chunk_size=20, progressbar=True):
"""Compute the outer product of this rotation and the other
rotation or vector.
Parameters
----------
other : Rotation or Vector3d
lazy : bool, optional
Whether to computer this computation using Dask. This option
can be used to reduce memory usage when working with large
arrays. Default is False.
chunk_size : int, optional
When using `lazy` computation, `chunk_size` represents the
number of objects per axis for each input to include in each
iteration of the computation. Default is 20.
progressbar : bool, optional
Whether to show a progressbar during computation if `lazy`
is True. Default is True.
Returns
-------
Rotation or Vector3d
"""
if lazy:
darr = self._outer_dask(other, chunk_size=chunk_size)
arr = np.empty(darr.shape)
if progressbar:
with ProgressBar():
da.store(darr, arr)
else:
da.store(darr, arr)
r = other.__class__(arr)
else:
r = super().outer(other)
if isinstance(r, Rotation):
r.improper = np.logical_xor.outer(self.improper, other.improper)
elif isinstance(r, Vector3d):
r[self.improper] = -r[self.improper]
return r
def flatten(self):
"""A new object with the same data in a single column."""
r = super().flatten()
r.improper = self.improper.T.flatten().T
return r
@property
def improper(self):
"""ndarray : True for improper rotations and False otherwise."""
return self._data[..., -1].astype(bool)
@improper.setter
def dot_outer(self, other):
"""numpy.ndarray : the outer dot product of this rotation and the other."""
cosines = np.abs(super().dot_outer(other))
if isinstance(other, Rotation):
improper = self.improper.reshape(self.shape + (1,) * len(other.shape))
i = np.logical_xor(improper, other.improper)
cosines = np.minimum(~i, cosines)
else:
cosines[self.improper] = 0
return cosines
@classmethod
def from_neo_euler(cls, neo_euler):
"""Creates a rotation from a neo-euler (vector) representation.
Parameters
----------
neo_euler : NeoEuler
Vector parametrization of a rotation.
"""
s = np.sin(neo_euler.angle / 2)
a = np.cos(neo_euler.angle / 2)
b = s * neo_euler.axis.x
c = s * neo_euler.axis.y
d = s * neo_euler.axis.z
r = cls(np.stack([a, b, c, d], axis=-1))
return r
@classmethod
def from_axes_angles(cls, axes, angles):
"""Creates rotation(s) from axis-angle pair(s).
Parameters
----------
axes : Vector3d or array_like
The axis of rotation.
angles : array_like
The angle of rotation, in radians.
Returns
-------
Rotation
Examples
--------
>>> import numpy as np
>>> from orix.quaternion import Rotation
>>> rot = Rotation.from_axes_angles((0, 0, -1), np.pi / 2)
>>> rot
Rotation (1,)
[[ 0.7071 0. 0. -0.7071]]
See Also
--------
from_neo_euler
"""
axangle = AxAngle.from_axes_angles(axes, angles)
return cls.from_neo_euler(axangle)
# TODO: Remove decorator and **kwargs in 1.0
@deprecated_argument("convention", since="0.9", removal="1.0")
def to_euler(self, **kwargs):
r"""Rotations as Euler angles in the Bunge convention
:cite:`rowenhorst2015consistent`.
Returns
-------
numpy.ndarray
Array of Euler angles in radians, in the ranges
:math:`\phi_1 \in [0, 2\pi]`, :math:`\Phi \in [0, \pi]`, and
:math:`\phi_1 \in [0, 2\pi]`.
"""
# A.14 from Modelling Simul. Mater. Sci. Eng. 23 (2015) 083501
n = self.data.shape[:-1]
e = np.zeros(n + (3,))
a, b, c, d = self.a, self.b, self.c, self.d
q03 = a**2 + d**2
q12 = b**2 + c**2
chi = np.sqrt(q03 * q12)
# P = 1
q12_is_zero = q12 == 0
if np.sum(q12_is_zero) > 0:
alpha = np.arctan2(-2 * a * d, a**2 - d**2)
e[..., 0] = np.where(q12_is_zero, alpha, e[..., 0])
e[..., 1] = np.where(q12_is_zero, 0, e[..., 1])
e[..., 2] = np.where(q12_is_zero, 0, e[..., 2])
q03_is_zero = q03 == 0
if np.sum(q03_is_zero) > 0:
alpha = np.arctan2(2 * b * c, b**2 - c**2)
e[..., 0] = np.where(q03_is_zero, alpha, e[..., 0])
e[..., 1] = np.where(q03_is_zero, np.pi, e[..., 1])
e[..., 2] = np.where(q03_is_zero, 0, e[..., 2])
if np.sum(chi != 0) > 0:
not_zero = ~np.isclose(chi, 0)
alpha = np.arctan2(
np.divide(
b * d - a * c, chi, where=not_zero, out=np.full_like(chi, np.inf)
),
np.divide(
-a * b - c * d, chi, where=not_zero, out=np.full_like(chi, np.inf)
),
)
beta = np.arctan2(2 * chi, q03 - q12)
gamma = np.arctan2(
np.divide(
a * c + b * d, chi, where=not_zero, out=np.full_like(chi, np.inf)
),
np.divide(
c * d - a * b, chi, where=not_zero, out=np.full_like(chi, np.inf)
),
)
e[..., 0] = np.where(not_zero, alpha, e[..., 0])
e[..., 1] = np.where(not_zero, beta, e[..., 1])
e[..., 2] = np.where(not_zero, gamma, e[..., 2])
# Reduce Euler angles to definition range
e[np.abs(e) < _FLOAT_EPS] = 0
e = np.where(e < 0, np.mod(e + 2 * np.pi, (2 * np.pi, np.pi, 2 * np.pi)), e)
return e
# TODO: Remove decorator, **kwargs, and use of "convention" in 1.0
@classmethod
@deprecated_argument("convention", "0.9", "1.0", "direction")
def from_euler(cls, euler, direction="lab2crystal", **kwargs):
"""Creates a rotation from an array of Euler angles in radians.
Parameters
----------
euler : array-like
Euler angles in radians in the Bunge convention.
direction : str
"lab2crystal" (default) or "crystal2lab". "lab2crystal"
is the Bunge convention. If "MTEX" is provided then the
direction is "crystal2lab".
"""
direction = direction.lower()
if direction == "mtex" or (
"convention" in kwargs and kwargs["convention"] == "mtex"
):
# MTEX uses bunge but with lab2crystal referencing:
# see - https://mtex-toolbox.github.io/MTEXvsBungeConvention.html
# and orix issue #215
direction = "crystal2lab"
directions = ["lab2crystal", "crystal2lab"]
# processing directions
if direction not in directions:
raise ValueError(
f"The chosen direction is not one of the allowed options {directions}"
)
euler = np.asarray(euler)
if np.any(np.abs(euler) > 4 * np.pi):
warnings.warn(
"Angles are assumed to be in radians, but degrees might have been "
"passed."
)
n = euler.shape[:-1]
alpha, beta, gamma = euler[..., 0], euler[..., 1], euler[..., 2]
# Uses A.5 & A.6 from Modelling Simul. Mater. Sci. Eng. 23
# (2015) 083501
sigma = 0.5 * np.add(alpha, gamma)
delta = 0.5 * np.subtract(alpha, gamma)
c = np.cos(beta / 2)
s = np.sin(beta / 2)
# Using P = 1 from A.6
q = np.zeros(n + (4,))
q[..., 0] = c * np.cos(sigma)
q[..., 1] = -s * np.cos(delta)
q[..., 2] = -s * np.sin(delta)
q[..., 3] = -c * np.sin(sigma)
for i in [1, 2, 3, 0]: # flip the zero element last
q[..., i] = np.where(q[..., 0] < 0, -q[..., i], q[..., i])
data = Quaternion(q)
if direction == "crystal2lab":
data = ~data
rot = cls(data)
rot.improper = np.zeros(n)
return rot
def to_matrix(self):
"""Rotations as orientation matrices
:cite:`rowenhorst2015consistent`.
Returns
-------
numpy.ndarray
Array of orientation matrices.
Examples
--------
>>> import numpy as np
>>> from orix.quaternion.rotation import Rotation
>>> r = Rotation([1, 0, 0, 0])
>>> np.allclose(r.to_matrix(), np.eye(3))
True
>>> r = Rotation([0, 1, 0, 0])
>>> np.allclose(r.to_matrix(), np.diag([1, -1, -1]))
True
"""
a, b, c, d = self.a, self.b, self.c, self.d
om = np.zeros(self.shape + (3, 3))
bb = b**2
cc = c**2
dd = d**2
qq = a**2 - (bb + cc + dd)
bc = b * c
ad = a * d
bd = b * d
ac = a * c
cd = c * d
ab = a * b
om[..., 0, 0] = qq + 2 * bb
om[..., 0, 1] = 2 * (bc - ad)
om[..., 0, 2] = 2 * (bd + ac)
om[..., 1, 0] = 2 * (bc + ad)
om[..., 1, 1] = qq + 2 * cc
om[..., 1, 2] = 2 * (cd - ab)
om[..., 2, 0] = 2 * (bd - ac)
om[..., 2, 1] = 2 * (cd + ab)
om[..., 2, 2] = qq + 2 * dd
return om
@classmethod
def from_matrix(cls, matrix):
"""Creates rotations from orientation matrices
:cite:`rowenhorst2015consistent`.
Parameters
----------
matrix : array_like
Array of orientation matrices.
Examples
--------
>>> import numpy as np
>>> from orix.quaternion import Rotation
>>> r = Rotation.from_matrix(np.eye(3))
>>> np.allclose(r.data, [1, 0, 0, 0])
True
>>> r = Rotation.from_matrix(np.diag([1, -1, -1]))
>>> np.allclose(r.data, [0, 1, 0, 0])
True
"""
om = np.asarray(matrix)
# Assuming (3, 3) as last two dims
n = (1,) if om.ndim == 2 else om.shape[:-2]
q = np.zeros(n + (4,))
# Compute quaternion components
q0_almost = 1 + om[..., 0, 0] + om[..., 1, 1] + om[..., 2, 2]
q1_almost = 1 + om[..., 0, 0] - om[..., 1, 1] - om[..., 2, 2]
q2_almost = 1 - om[..., 0, 0] + om[..., 1, 1] - om[..., 2, 2]
q3_almost = 1 - om[..., 0, 0] - om[..., 1, 1] + om[..., 2, 2]
q[..., 0] = 0.5 * np.sqrt(np.where(q0_almost < _FLOAT_EPS, 0, q0_almost))
q[..., 1] = 0.5 * np.sqrt(np.where(q1_almost < _FLOAT_EPS, 0, q1_almost))
q[..., 2] = 0.5 * np.sqrt(np.where(q2_almost < _FLOAT_EPS, 0, q2_almost))
q[..., 3] = 0.5 * np.sqrt(np.where(q3_almost < _FLOAT_EPS, 0, q3_almost))
# Modify component signs if necessary
q[..., 1] = np.where(om[..., 2, 1] < om[..., 1, 2], -q[..., 1], q[..., 1])
q[..., 2] = np.where(om[..., 0, 2] < om[..., 2, 0], -q[..., 2], q[..., 2])
q[..., 3] = np.where(om[..., 1, 0] < om[..., 0, 1], -q[..., 3], q[..., 3])
return cls(Quaternion(q)).unit # Normalized
@classmethod
def identity(cls, shape=(1,)):
"""Create identity rotations.
Parameters
----------
shape : tuple
The shape out of which to construct identity quaternions.
"""
data = np.zeros(shape + (4,))
data[..., 0] = 1
return cls(data)
@property
def axis(self):
"""The axis of rotation as a :class:`~orix.vector.Vector3d`."""
axis = Vector3d(np.stack((self.b, self.c, self.d), axis=-1))
a_is_zero = self.a < -1e-6
axis[a_is_zero] = -axis[a_is_zero]
norm_is_zero = axis.norm == 0
axis[norm_is_zero] = Vector3d.zvector() * np.sign(self.a[norm_is_zero].data)
axis.data = axis.data / axis.norm[..., np.newaxis]
return axis
@property
def angle(self):
"""The angle of rotation as a numpy.ndarray."""
return 2 * np.nan_to_num(np.arccos(np.abs(self.a)))
@classmethod
def random(cls, shape=(1,)):
"""Uniformly distributed rotations.
Parameters
----------
shape : int or tuple of int, optional
The shape of the required object.
"""
shape = (shape,) if isinstance(shape, int) else shape
n = int(np.prod(shape))
rotations = []
while len(rotations) < n:
r = np.random.uniform(-1, 1, (3 * n, cls.dim))
r2 = np.sum(np.square(r), axis=1)
r = r[np.logical_and(1e-9**2 < r2, r2 <= 1)]
rotations += list(r)
return cls(np.array(rotations[:n])).reshape(*shape)
@classmethod
def random_vonmises(cls, shape=(1,), alpha=1.0, reference=(1, 0, 0, 0)):
"""Random rotations with a simplified Von Mises-Fisher
distribution.
Parameters
----------
shape : int or tuple of int, optional
The shape of the required object.
alpha : float
Parameter for the VM-F distribution. Lower values lead to
"looser" distributions.
reference : Rotation
The center of the distribution.
"""
shape = (shape,) if isinstance(shape, int) else shape
reference = Rotation(reference)
n = int(np.prod(shape))
sample_size = int(alpha) * n
rotations = []
f_max = von_mises(reference, alpha, reference)
while len(rotations) < n:
rotation = cls.random(sample_size)
f = von_mises(rotation, alpha, reference)
x = np.random.rand(sample_size)
rotation = rotation[x * f_max < f]
rotations += list(rotation)
return cls.stack(rotations[:n]).reshape(*shape)
@property
def antipodal(self):
"""Rotation : this and antipodally equivalent rotations."""
r = self.__class__(np.stack([self.data, -self.data], axis=0))
r.improper = self.improper
return r
def von_mises(x, alpha, reference=Rotation((1, 0, 0, 0))):
r"""A vastly simplified Von Mises-Fisher distribution calculation.
Parameters
----------
x : Rotation
alpha : float
Lower values of alpha lead to "looser" distributions.
reference : Rotation, optional
Notes
-----
This simplified version of the distribution is calculated using
.. math::
\frac{\exp\left(2\alpha\cos\left(\omega\right)\right)}{\_0F\_1\left(\frac{N}{2}, \alpha^2\right)}
where :math:`\omega` is the angle between orientations and :math:`N`
is the number of relevant dimensions, in this case 3.
Returns
-------
numpy.ndarray
"""
angle = Rotation(x).angle_with(reference)
return np.exp(2 * alpha * np.cos(angle.data)) / hyp0f1(1.5, alpha**2)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2864,
12,
1238,
1828,
262,
267,
8609,
6505,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
267,
8609,
13,
198,
2,
198,
2,
267,
8609,
318,
1479,
3788,
25,
345,... | 2.108649 | 10,198 |
import os
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import *
from keras.utils import to_categorical
train_dataset = pd.read_csv('datasets/train.csv')
test_dataset = pd.read_csv('datasets/test.csv')
train_dataset.head()
train_images = train_dataset.iloc[:, 1:785]
train_labels = train_dataset.iloc[:, 0]
test_images = test_dataset.iloc[:, 0:784]
train_images = train_images.to_numpy().reshape(42000, 784)
test_images = test_images.to_numpy().reshape(28000, 784)
train_images = (train_images/255)-0.7
test_images = (test_images/255)-0.7
# Flattening the images into a 784 dimensional vector
train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1,784))
# Building the model
ANN_model = Sequential()
ANN_model.add(Dense(784, activation='relu', input_dim=784))
ANN_model.add(Dense(300, activation='relu'))
ANN_model.add(Dense(100, activation='relu'))
ANN_model.add(Dense(100, activation='relu'))
ANN_model.add(Dense(100, activation='relu'))
ANN_model.add(Dense(200, activation='relu'))
ANN_model.add(Dense(10, activation='softmax'))
# Compiling the model
ANN_model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Training the model
epoch_count = 90
ANN_model.fit(
train_images,
to_categorical(train_labels),
epochs=epoch_count,
batch_size=120
)
# Saving the model
ANN_model.save_weights('ANN-models/ANN_Model_{}.h5'.format(epoch_count))
test_pred = pd.DataFrame(ANN_model.predict(test_images, batch_size=60))
test_pred = pd.DataFrame(test_pred.idxmax(axis = 1))
test_pred.index.name = 'ImageId'
test_pred = test_pred.rename(columns = {0: 'Label'}).reset_index()
test_pred['ImageId'] = test_pred['ImageId'] + 1
test_pred.to_csv('submissions/submission_.csv', index = False)
| [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
41927,
292,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
1635,
198,
6738,
41927,
292,... | 2.561892 | 719 |
#coding:utf-8
import cv2
import numpy as np
import sys
sys.path.append(".\vision")
#棋盘格模板规格
w = 9
h = 7
# TODO
if __name__ == '__main__':
a_temp = Calibrate()
image_path = '.\\data\\*.jpg'
a_temp.read_sample(image_path)
a_temp.calibrate()
# filename = '.\\Config\\calibrate_config.txt'
# a_temp.save_parameter(filename)
# filename = '.\\Config\\calibrate_config.txt'
# a_temp.load_parameter(filename)
# # 初始化
# import json # 使用json存储摄像头矫正参数
# file_name = '.\\Config\\config.txt'
# with open(file_name) as file_obj:
# temp_d = json.load(file_obj) # 返回列表数据,也支持字典
# mtx = np.array(temp_d['mtx'])
# dist = np.array(temp_d['dist'])
# print("读取参数:", mtx, dist)
# test
image_path = r'.\data\time1560131719.978197.jpg'
a_temp.test(image_path)
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
1911,
59,
10178,
4943,
198,
198,
2,
162,
96,
233,
33566,
246,
43718,
120,
162... | 1.835073 | 479 |
from unittest import mock
from unittest.mock import ANY
from math import nan
import pandas as pd
from phc.easy.omics.genomic_test import GenomicTest
raw_df = pd.DataFrame(
# NOTE: Sample is taken and adapted from BRCA data set
[
{
"sets": [
{
"status": "ACTIVE",
"setType": "expression",
"id": "1578b108-719e-4962-85f4-488c14aec26c",
"fileId": "93f0bec1-a26b-42b5-9650-e368d748c3f8",
"name": "Kapa400",
}
],
"tasks": [],
"id": "1578b108-719e-4962-85f4-488c14aec26c",
"datasetId": "34c0fb25-bbde-4eb1-87ed-dd4c7a1ac013",
"name": "Kapa400",
"indexedDate": "2020-09-29T20:17:24.483Z",
"status": "ACTIVE",
"patientId": nan,
"referenceSetId": nan,
"patient": nan,
},
{
"sets": [
{
"status": "ACTIVE",
"sequenceType": "somatic",
"setType": "shortVariant",
"id": "defea4df-3fb5-4326-a0d9-576232a200f2",
"fileId": "611a2b9a-17f8-4d98-9ef8-edefd02b9ee0",
"sequenceId": "da852d20-0a33-4e24-b993-16b5fa545dc6",
"name": "LO-C8-A138",
}
],
"tasks": [],
"id": "da852d20-0a33-4e24-b993-16b5fa545dc6",
"datasetId": "34c0fb25-bbde-4eb1-87ed-dd4c7a1ac013",
"name": "LO-C8-A138",
"indexedDate": "2020-09-29T20:15:59.521Z",
"status": "ACTIVE",
"patientId": "b6c286c6-2755-419b-87d3-59c7feda9653",
"referenceSetId": "GRCh38",
"patient": {
"name": [
{"text": "C8A138 LO", "given": ["C8A138"], "family": "LO"}
],
"identifier": [
{
"type": {
"coding": [
{
"code": "ANON",
"system": "http://hl7.org/fhir/v2/0203",
}
]
},
"value": "LO-C8-A138",
}
],
"id": "b6c286c6-2755-419b-87d3-59c7feda9653",
"resourceType": "Patient",
},
},
{
"sets": [
{
"status": "ACTIVE",
"sequenceType": "somatic",
"setType": "shortVariant",
"id": "2819306a-fbb8-4b32-a753-d2407e9c330a",
"fileId": "0d9465db-4f05-4e15-9265-25f01eec42ec",
"sequenceId": "f571ec0e-b097-48a1-9bcf-bc9d0bc2a1ee",
"name": "LO-A7-A3RF",
}
],
"tasks": [],
"id": "f571ec0e-b097-48a1-9bcf-bc9d0bc2a1ee",
"datasetId": "34c0fb25-bbde-4eb1-87ed-dd4c7a1ac013",
"name": "LO-A7-A3RF",
"indexedDate": "2020-09-29T20:15:59.392Z",
"status": "ACTIVE",
"patientId": "7451af3c-acc0-4d79-8429-6b8be96911d8",
"referenceSetId": "GRCh38",
"patient": {
"name": [
{"text": "A7A3RF LO", "given": ["A7A3RF"], "family": "LO"}
],
"identifier": [
{
"type": {
"coding": [
{
"code": "ANON",
"system": "http://hl7.org/fhir/v2/0203",
}
]
},
"value": "LO-A7-A3RF",
}
],
"id": "7451af3c-acc0-4d79-8429-6b8be96911d8",
"resourceType": "Patient",
},
},
]
)
@mock.patch("phc.easy.query.Query.execute_paging_api")
def test_get_data_frame(execute_paging_api):
"test a concrete subclass of pagingapiitem"
execute_paging_api.return_value = GenomicTest.transform_results(
raw_df, params={}
)
frame = GenomicTest.get_data_frame()
execute_paging_api.assert_called_once_with(
"genomics/projects/{project_id}/tests",
{"patientId": None, "status": "ACTIVE", "type": None},
all_results=False,
auth_args=ANY,
max_pages=None,
page_size=None,
log=False,
ignore_cache=False,
transform=ANY,
response_to_items=ANY
)
assert frame.columns.tolist() == [
"status",
"setType",
"id",
"fileId",
"name",
"sequenceType",
"sequenceId",
"tasks",
"id.test",
"datasetId",
"name.test",
"indexedDate",
"status.test",
"referenceSetId",
"patientId",
"patient.name_text",
"patient.name_given",
"patient.name_family",
"patient.type_coding_identifier_system__hl7.org/fhir/v2/0203__code",
"patient.type_coding_identifier_system__hl7.org/fhir/v2/0203__value",
"patient.id",
"patient.resourceType",
]
assert frame.setType.unique().tolist() == ["expression", "shortVariant"]
| [
6738,
555,
715,
395,
1330,
15290,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
15529,
198,
6738,
10688,
1330,
15709,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
872,
66,
13,
38171,
13,
31994,
13,
5235,
10179,
62,
9288,
133... | 1.535685 | 3,601 |
# @Author : bamtercelboo
# @Datetime : 2018/8/27 15:34
# @File : Embed.py
# @Last Modify Time : 2018/8/27 15:34
# @Contact : bamtercelboo@{gmail.com, 163.com}
"""
FILE : Embed.py
FUNCTION : None
"""
import os
import sys
import time
import tqdm
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
from collections import OrderedDict
from DataUtils.Common import *
torch.manual_seed(seed_num)
np.random.seed(seed_num)
class Embed(object):
"""
Embed
"""
def get_embed(self):
"""
:return:
"""
embed_dict = None
if self.embed_type in self.embed_type_enum:
embed_dict = self._read_file(path=self.path)
else:
print("embed_type illegal, must be in {}".format(self.embed_type_enum))
exit()
# print(embed_dict)
embed = None
if self.embed_type == "nn":
embed = self._nn_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "zero":
embed = self._zeros_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "uniform":
embed = self._uniform_embed(embed_dict=embed_dict, words_dict=self.words_dict)
elif self.embed_type == "avg":
embed = self._avg_embed(embed_dict=embed_dict, words_dict=self.words_dict)
# print(embed)
self.info()
return embed
def _zeros_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by zeros for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _nn_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by nn.Embedding for out of vocabulary.")
embed = nn.Embedding(int(self.words_count), int(self.dim))
init.xavier_uniform(embed.weight.data)
embeddings = np.array(embed.weight.data)
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
self.fuzzy_count += 1
else:
self.oov_count += 1
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _uniform_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by uniform for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6) # uniform
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = uniform_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
def _avg_embed(self, embed_dict, words_dict):
"""
:param embed_dict:
:param words_dict:
"""
print("loading pre_train embedding by avg for out of vocabulary.")
embeddings = np.zeros((int(self.words_count), int(self.dim)))
inword_list = {}
for word in words_dict:
if word in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
inword_list[words_dict[word]] = 1
self.exact_count += 1
elif word.lower() in embed_dict:
embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
inword_list[words_dict[word]] = 1
self.fuzzy_count += 1
else:
self.oov_count += 1
sum_col = np.sum(embeddings, axis=0) / len(inword_list) # avg
for i in range(len(words_dict)):
if i not in inword_list and i != self.padID:
embeddings[i] = sum_col
final_embed = torch.from_numpy(embeddings).float()
return final_embed
@staticmethod
def _read_file(path):
"""
:param path: embed file path
:return:
"""
embed_dict = {}
with open(path, encoding='utf-8') as f:
lines = f.readlines()
lines = tqdm.tqdm(lines)
for line in lines:
values = line.strip().split(' ')
if len(values) == 1 or len(values) == 2 or len(values) == 3:
continue
w, v = values[0], values[1:]
embed_dict[w] = v
return embed_dict
def info(self):
"""
:return:
"""
total_count = self.exact_count + self.fuzzy_count
print("Words count {}, Embed dim {}.".format(self.words_count, self.dim))
print("Exact count {} / {}".format(self.exact_count, self.words_count))
print("Fuzzy count {} / {}".format(self.fuzzy_count, self.words_count))
print(" INV count {} / {}".format(total_count, self.words_count))
print(" OOV count {} / {}".format(self.oov_count, self.words_count))
print(" OOV radio ===> {}%".format(np.round((self.oov_count / total_count) * 100, 2)))
print(40 * "*")
@staticmethod
def _get_dim(path):
"""
:param path:
:return:
"""
embedding_dim = -1
with open(path, encoding='utf-8') as f:
for line in f:
line_split = line.strip().split(' ')
if len(line_split) == 1:
embedding_dim = line_split[0]
break
elif len(line_split) == 2:
embedding_dim = line_split[1]
break
else:
embedding_dim = len(line_split) - 1
break
return embedding_dim
@staticmethod
def _list2dict(convert_list):
"""
:param convert_list:
:return:
"""
list_dict = OrderedDict()
list_lower = []
for index, word in enumerate(convert_list):
list_lower.append(word.lower())
list_dict[word] = index
assert len(list_lower) == len(list_dict)
return list_dict, list_lower
| [
2,
2488,
13838,
1058,
275,
321,
353,
5276,
2127,
78,
198,
2,
2488,
27354,
8079,
1058,
2864,
14,
23,
14,
1983,
1315,
25,
2682,
198,
2,
2488,
8979,
1058,
13302,
276,
13,
9078,
198,
2,
2488,
5956,
3401,
1958,
3862,
1058,
2864,
14,
23... | 2.016805 | 3,868 |
from spaceone.api.cost_analysis.plugin import data_source_pb2
from spaceone.core.pygrpc.message_type import *
__all__ = ['PluginInfo']
| [
6738,
2272,
505,
13,
15042,
13,
15805,
62,
20930,
13,
33803,
1330,
1366,
62,
10459,
62,
40842,
17,
198,
6738,
2272,
505,
13,
7295,
13,
9078,
2164,
14751,
13,
20500,
62,
4906,
1330,
1635,
628,
198,
834,
439,
834,
796,
37250,
37233,
1... | 3.066667 | 45 |
import pymongo
read_pref = pymongo.read_preferences.ReadPreference.SECONDARY
c = pymongo.MongoClient(host="mongodb://localhost:27017",
replicaSet="rs1",
w=3, wtimeout=10000, j=True,
read_preference=read_pref)
db = c.m101
people = db.people
print "inserting"
people.insert({"name":"Andrew Erlichson", "favorite_color":"blue"})
print "inserting"
people.insert({"name":"Richard Krueter", "favorite_color":"red"})
print "inserting"
people.insert({"name":"Dwight Merriman", "favorite_color":"green"})
| [
198,
11748,
279,
4948,
25162,
198,
198,
961,
62,
3866,
69,
796,
279,
4948,
25162,
13,
961,
62,
3866,
69,
4972,
13,
5569,
6719,
4288,
13,
23683,
18672,
13153,
198,
198,
66,
796,
279,
4948,
25162,
13,
44,
25162,
11792,
7,
4774,
2625,
... | 2.279528 | 254 |
#
# Function for fresh plain deployment
import os
import fnmatch
import os.pathsep
from java.io import File
from java.io import FileInputStream
username = None
password = None
adminUrl = None
applicationPath = None
planPath = None
targetServers = None
# All the application will be undeployed. This is necessary
try:
init()
conn()
stop_and_undeploy_all_applications()
deploy_bh_application()
except:
dumpStack()
exit() | [
2,
201,
198,
2,
15553,
329,
4713,
8631,
14833,
201,
198,
201,
198,
11748,
28686,
201,
198,
11748,
24714,
15699,
201,
198,
11748,
28686,
13,
6978,
325,
79,
201,
198,
201,
198,
6738,
20129,
13,
952,
1330,
9220,
201,
198,
6738,
20129,
... | 2.705202 | 173 |
from io import StringIO
from unittest import TestCase
from rpa_logger import Logger
from rpa_logger.task import ERROR, STATUSES, SUCCESS
| [
6738,
33245,
1330,
10903,
9399,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
374,
8957,
62,
6404,
1362,
1330,
5972,
1362,
198,
6738,
374,
8957,
62,
6404,
1362,
13,
35943,
1330,
33854,
11,
15486,
2937,
1546,
11,
13558,
... | 3.136364 | 44 |
#!/usr/bin/env python3
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert VCD file to VHDL statements')
parser.add_argument("-i", "--input", required=True)
args = parser.parse_args()
timescale = "0 ns"
prev_t = 0
r = ""
with open(args.input) as f:
for i, line in enumerate(f):
line = line.rstrip()
if i == 0:
wires = line.split(", ")[1:]
wires = [w.replace(' ', '_').lower() for w in wires]
continue
vs = line.split(', ')
t = int(float(vs[0]) * 1e9)
vs = vs[1:]
dt = t - prev_t
dt = min(dt, 1000000)
if dt > 0:
r += f"wait for {dt} ns;\n"
for i, v in enumerate(vs):
r += f"{wires[i]} <= '{v}';\n"
prev_t = t
print(r)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
3103,
1... | 1.840726 | 496 |
"""https://adventofcode.com/2020/day/12"""
import io
import math
def part1(stdin: io.TextIOWrapper, stderr: io.TextIOWrapper) -> int:
"""
Figure out where the navigation instructions lead. What is the Manhattan
distance between that location and the ship's starting position?
"""
directions = parse(stdin)
x_pos = 0
y_pos = 0
heading = 90
for (action, distance) in directions:
stderr.write(f"{action}{distance} ")
if action == 'F':
action = {0: 'N', 90: 'E', 180: 'S', 270: 'W'}[heading]
if action == 'N':
y_pos += distance
elif action == 'S':
y_pos -= distance
elif action == 'E':
x_pos += distance
elif action == 'W':
x_pos -= distance
elif action == 'L':
heading = (heading - distance + 360) % 360
elif action == 'R':
heading = (heading + distance) % 360
stderr.write(f"{x_pos},{y_pos}@{heading}\n")
return abs(x_pos) + abs(y_pos)
def part2(stdin: io.TextIOWrapper, stderr: io.TextIOWrapper) -> int:
"""
Figure out where the navigation instructions actually lead. What is the
Manhattan distance between that location and the ship's starting position?
"""
directions = parse(stdin)
x_pos = 0
y_pos = 0
x_waypoint = 10
y_waypoint = 1
for (action, distance) in directions:
stderr.write(f"{action}{distance} ")
if action == 'N':
y_waypoint += distance
elif action == 'S':
y_waypoint -= distance
elif action == 'E':
x_waypoint += distance
elif action == 'W':
x_waypoint -= distance
elif action == 'R':
action = 'L'
distance *= -1
if action == 'L':
(x_waypoint, y_waypoint) = (
round(x_waypoint * math.cos(distance * math.pi/180)
- y_waypoint * math.sin(distance * math.pi/180)),
round(x_waypoint * math.sin(distance * math.pi/180)
+ y_waypoint * math.cos(distance * math.pi/180)),
)
if action == 'F':
x_pos += x_waypoint * distance
y_pos += y_waypoint * distance
stderr.write(f"{x_pos},{y_pos} waypoint {x_waypoint},{y_waypoint}\n")
return abs(x_pos) + abs(y_pos)
def parse(stdin: io.TextIOWrapper) -> list:
"""
Parse the input into a list of tuples: string direction and int distance.
"""
return [
(line[0], int(line[1:]))
for line in stdin.read().strip().splitlines()
]
| [
37811,
5450,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
42334,
14,
820,
14,
1065,
37811,
198,
198,
11748,
33245,
198,
11748,
10688,
628,
198,
4299,
636,
16,
7,
19282,
259,
25,
33245,
13,
8206,
40,
3913,
430,
2848,
11,
336,
1082,
81,
... | 2.135332 | 1,234 |
"""
1232. Check If It Is a Straight Line
Easy
You are given an array coordinates, coordinates[i] = [x, y], where [x, y] represents the coordinate of a point. Check if these points make a straight line in the XY plane.
Example 1:
Input: coordinates = [[1,2],[2,3],[3,4],[4,5],[5,6],[6,7]]
Output: true
Example 2:
Input: coordinates = [[1,1],[2,2],[3,4],[4,5],[5,6],[7,7]]
Output: false
Constraints:
2 <= coordinates.length <= 1000
coordinates[i].length == 2
-10^4 <= coordinates[i][0], coordinates[i][1] <= 10^4
coordinates contains no duplicate point.
"""
| [
37811,
198,
1065,
2624,
13,
6822,
1002,
632,
1148,
257,
27680,
6910,
198,
28406,
198,
198,
1639,
389,
1813,
281,
7177,
22715,
11,
22715,
58,
72,
60,
796,
685,
87,
11,
331,
4357,
810,
685,
87,
11,
331,
60,
6870,
262,
20435,
286,
25... | 2.882653 | 196 |
from numba import jit
# fast roc_auc computation: https://www.kaggle.com/c/microsoft-malware-prediction/discussion/76013
@jit
# idea from this kernel: https://www.kaggle.com/fabiendaniel/detecting-malwares-with-lgbm
| [
6738,
997,
7012,
1330,
474,
270,
198,
198,
2,
3049,
686,
66,
62,
14272,
29964,
25,
3740,
1378,
2503,
13,
74,
9460,
293,
13,
785,
14,
66,
14,
40485,
12,
7617,
1574,
12,
28764,
2867,
14,
15410,
11956,
14,
4304,
30273,
198,
31,
45051... | 2.695122 | 82 |
n = 3.14159
raio = float(input())
A = pow(raio, 2)*n
print('A={:.4f}'.format(A))
| [
77,
796,
513,
13,
1415,
19707,
198,
430,
952,
796,
12178,
7,
15414,
28955,
198,
32,
796,
7182,
7,
430,
952,
11,
362,
27493,
77,
198,
4798,
10786,
32,
34758,
25,
13,
19,
69,
92,
4458,
18982,
7,
32,
4008,
198
] | 1.97561 | 41 |
#!/usr/bin/env python
import sys
import os
# Import pycdlib itself.
import pycdlib
if len(sys.argv) < 1:
print('Usage: %s' % (sys.argv[0]))
sys.exit(1)
# Create a new PyCdlib object.
iso = pycdlib.PyCdlib()
iso.new(interchange_level=3)
slash = '/'
for i in range(1, len(sys.argv)):
name = '/%s' % sys.argv[i].rsplit('/', 1)[-1].replace('-', '_').upper()
iso.add_file(sys.argv[i], name)
isoLocation = os.getcwd()
isoLocation += '/archive.iso'
iso.write(isoLocation)
iso.close()
print('%s %s' % ('Created iso', isoLocation))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
2,
17267,
12972,
10210,
8019,
2346,
13,
198,
11748,
12972,
10210,
8019,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
1279,
352,
25,
198,
... | 2.280335 | 239 |
import argparse
import pickle
import os
import numpy as np
from filter_corrected_alleles import parse_perfect_sam, parse_fasta
from parse_contig_realign import parse_CIGAR
from utils import get_reverse_complement
import sys
if __name__ == "__main__":
args = parse_args()
fn_sam_H1 = args.fn_sam_H1
fn_sam_H2 = args.fn_sam_H2
fn_asm_H1 = args.fn_asm_H1
fn_asm_H2 = args.fn_asm_H2
fn_pickle_H1 = args.fn_pickle_H1
fn_pickle_H2 = args.fn_pickle_H2
len_extend = args.len_extend
fo_annotation_summary = args.fo_annotation_summary
fo_perfect_annotation_report = args.fo_perfect_annotation_report
fo_mismatched_annotation_report = args.fo_mismatched_annotation_report
fo_mismatched_fasta = args.fo_mismatched_fasta
fo_flanking_fasta = args.fo_flanking_fasta
if fn_sam_H2: # if there are two genome H1, H2 to analyze
dict_contig_H1 = None
dict_contig_H2 = None
if os.path.exists(fn_pickle_H1):
print ('Pickle file', fn_pickle_H1, 'has existed, load for it instead of re-fetching')
f = open(fn_pickle_H1, 'rb')
dict_contig_H1 = pickle.load(f)
f.close()
else:
dict_contig_H1 = parse_fasta(fn_asm_H1)
if fn_pickle_H1:
f = open(fn_pickle_H1, 'wb')
pickle.dump(dict_contig_H1, f)
f.close()
if os.path.exists(fn_pickle_H2):
print ('Pickle file', fn_pickle_H2, 'has existed, load for it instead of re-fetching')
f = open(fn_pickle_H2, 'rb')
dict_contig_H2 = pickle.load(f)
f.close()
else:
dict_contig_H2 = parse_fasta(fn_asm_H2)
if fn_pickle_H2:
f = open(fn_pickle_H2, 'wb')
pickle.dump(dict_contig_H2, f)
f.close()
list_perfect_fields_1, list_mismatch_fields_1 = parse_perfect_sam(fn_sam_H1)
list_perfect_fields_2, list_mismatch_fields_2 = parse_perfect_sam(fn_sam_H2)
set_perfect_allele_1 = set(fields[0] for fields in list_perfect_fields_1)
set_perfect_allele_2 = set(fields[0] for fields in list_perfect_fields_2)
print("========== Annotation of IMGT Alleles ==========")
print("There are", len(list_perfect_fields_1), "allele sites and", len(set_perfect_allele_1), "alleles in H1.")
print("There are", len(list_perfect_fields_2), "allele sites and", len(set_perfect_allele_2), "alleles in H2.")
print("There are", len(set_perfect_allele_1.intersection(set_perfect_allele_2)), "common alleles in H1 and H2.")
# output the perfect annotations
dict_occupied_place_1 = {}
dict_occupied_place_2 = {}
occupied_annotation(dict_occupied_place_1, dict_occupied_place_2, list_perfect_fields_1, list_perfect_fields_2, \
fo_perfect_annotation_report, dict_contig_H1, dict_contig_H2, fo_annotation_summary)
# output the mismatched annotations
if fo_mismatched_annotation_report:
occupied_annotation(dict_occupied_place_1, dict_occupied_place_2, list_mismatch_fields_1, list_mismatch_fields_2, \
fo_mismatched_annotation_report, dict_contig_H1, dict_contig_H2, fo_annotation_summary)
print("========== Annotation of Imperfect Matches ==========")
allele_num_H1 = sum([len(list_contig) for list_contig in dict_occupied_place_1.values()])
print("There are", allele_num_H1, "potential alleles in H1 among", len(dict_occupied_place_1), "contigs.")
allele_num_H2 = sum([len(list_contig) for list_contig in dict_occupied_place_2.values()])
print("There are", allele_num_H2, "potential alleles in H2 among", len(dict_occupied_place_2), "contigs.")
if fo_mismatched_fasta:
dict_SEQ = {}
get_SEQ_from_sam_list(list_perfect_fields_1, dict_SEQ)
get_SEQ_from_sam_list(list_perfect_fields_2, dict_SEQ)
get_SEQ_from_sam_list(list_mismatch_fields_1, dict_SEQ)
get_SEQ_from_sam_list(list_mismatch_fields_2, dict_SEQ)
dict_corrected_alleles = {}
dict_flanking_alleles = {}
correct_allele(dict_occupied_place_1, dict_SEQ, dict_corrected_alleles, dict_flanking_alleles, dict_contig_H1, len_extend)
correct_allele(dict_occupied_place_2, dict_SEQ, dict_corrected_alleles, dict_flanking_alleles, dict_contig_H2, len_extend)
f_n = open(fo_mismatched_fasta, 'w')
for allele_name in sorted(dict_corrected_alleles.keys()):
for idx, SEQ in enumerate(sorted(dict_corrected_alleles[allele_name])):
f_n.write('>' + allele_name + '/novel-' + str(idx) + '\n')
f_n.write(SEQ + '\n')
f_n.close()
print("Output novel alleles.")
f_f = open(fo_flanking_fasta, 'w')
for allele_name in sorted(dict_flanking_alleles.keys()):
for idx, SEQ in enumerate(sorted(dict_flanking_alleles[allele_name])):
f_f.write('>' + allele_name + '-' + str(idx) + '\n')
f_f.write(SEQ + '\n')
f_f.close()
print("Output flanking sequences")
else:
print("Corrected mismatched files not specified.")
else: # if there is only one genome to analyze
dict_contig_H1 = None
if os.path.exists(fn_pickle_H1):
print ('Pickle file', fn_pickle_H1, 'has existed, load for it instead of re-fetching')
f = open(fn_pickle_H1, 'rb')
dict_contig_H1 = pickle.load(f)
f.close()
else:
dict_contig_H1 = parse_fasta(fn_asm_H1)
if fn_pickle_H1:
f = open(fn_pickle_H1, 'wb')
pickle.dump(dict_contig_H1, f)
f.close()
list_perfect_fields_1, list_mismatch_fields_1 = parse_perfect_sam(fn_sam_H1)
set_perfect_allele_1 = set(fields[0] for fields in list_perfect_fields_1)
print("========== Annotation of IMGT Alleles ==========")
print("There are", len(list_perfect_fields_1), "allele sites and", len(set_perfect_allele_1), "alleles in genome.")
# output the perfect annotations
dict_occupied_place_1 = {}
occupied_annotation(dict_occupied_place_1, {}, list_perfect_fields_1, [], fo_perfect_annotation_report, dict_contig_H1, {}, fo_annotation_summary)
# output the mismatched annotations
if fo_mismatched_annotation_report:
occupied_annotation(dict_occupied_place_1, {}, list_mismatch_fields_1, [], fo_mismatched_annotation_report, dict_contig_H1, {}, fo_annotation_summary)
print("========== Annotation of Imperfect Matches ==========")
allele_num_H1 = sum([len(list_contig) for list_contig in dict_occupied_place_1.values()])
print("There are", allele_num_H1, "potential alleles in the genome among", len(dict_occupied_place_1), "contigs.")
if fo_mismatched_fasta:
dict_SEQ = {}
get_SEQ_from_sam_list(list_perfect_fields_1, dict_SEQ)
get_SEQ_from_sam_list(list_mismatch_fields_1, dict_SEQ)
dict_corrected_alleles = {}
dict_flanking_alleles = {}
correct_allele(dict_occupied_place_1, dict_SEQ, dict_corrected_alleles, dict_flanking_alleles, dict_contig_H1, len_extend)
f_n = open(fo_mismatched_fasta, 'w')
for allele_name in sorted(dict_corrected_alleles.keys()):
for idx, SEQ in enumerate(sorted(dict_corrected_alleles[allele_name])):
f_n.write('>' + allele_name + '/novel-' + str(idx) + '\n')
f_n.write(SEQ + '\n')
f_n.close()
print("Output novel alleles.")
f_f = open(fo_flanking_fasta, 'w')
for allele_name in sorted(dict_flanking_alleles.keys()):
for idx, SEQ in enumerate(sorted(dict_flanking_alleles[allele_name])):
f_f.write('>' + allele_name + '-' + str(idx) + '\n')
f_f.write(SEQ + '\n')
f_f.close()
print("Output flanking sequences")
else:
print("Corrected mismatched files not specified.")
| [
11748,
1822,
29572,
198,
11748,
2298,
293,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
8106,
62,
30283,
276,
62,
6765,
829,
1330,
21136,
62,
25833,
62,
37687,
11,
21136,
62,
7217,
64,
198,
6738,
21136,
62,
3642,
... | 1.987471 | 4,310 |
import random
from common.key_exchange.protocols.srp import SecureRemotePasswordClient,\
SecureRemotePasswordServer
| [
11748,
4738,
198,
198,
6738,
2219,
13,
2539,
62,
1069,
3803,
13,
11235,
4668,
82,
13,
27891,
79,
1330,
26707,
36510,
35215,
11792,
11,
59,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.023529 | 85 |
"""Clone some funtions from Keras.
# References https://github.com/keras-team/keras/blob/master/keras/backend/numpy_backend.py
"""
import numpy as np | [
37811,
2601,
505,
617,
1257,
45240,
422,
17337,
292,
13,
198,
2,
31458,
3740,
1378,
12567,
13,
785,
14,
6122,
292,
12,
15097,
14,
6122,
292,
14,
2436,
672,
14,
9866,
14,
6122,
292,
14,
1891,
437,
14,
77,
32152,
62,
1891,
437,
13,
... | 2.777778 | 54 |
import datetime, time
from urlparse import urlparse
from django.db import models
from django.contrib.auth.models import User
| [
11748,
4818,
8079,
11,
640,
198,
6738,
19016,
29572,
1330,
19016,
29572,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198
] | 3.6 | 35 |
"""Allow users to view and modify (add, delete, chnage) material data.
For the convennience of data manipulation and displaying, this application
is built on Django REST Framework.
""" | [
37811,
35265,
2985,
284,
1570,
290,
13096,
357,
2860,
11,
12233,
11,
442,
77,
496,
8,
2587,
1366,
13,
198,
198,
1890,
262,
7292,
77,
1240,
286,
1366,
17512,
290,
19407,
11,
428,
3586,
198,
271,
3170,
319,
37770,
30617,
25161,
13,
19... | 4.204545 | 44 |
################################################################################
#
# New Zealand Geographic Board gazetteer application,
# Crown copyright (c) 2020, Land Information New Zealand on behalf of
# the New Zealand Government.
#
# This file is released under the MIT licence. See the LICENCE file found
# in the top-level directory of this distribution for more information.
#
################################################################################
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
| [
29113,
29113,
14468,
198,
2,
198,
2,
220,
968,
8936,
33636,
5926,
308,
1031,
5857,
263,
3586,
11,
198,
2,
220,
12223,
6634,
357,
66,
8,
12131,
11,
6379,
6188,
968,
8936,
319,
8378,
286,
198,
2,
220,
262,
968,
8936,
5070,
13,
198,
... | 4.646552 | 116 |
"""Class to hold all sensor accessories."""
import logging
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS,
ATTR_DEVICE_CLASS, STATE_ON, STATE_HOME)
from . import TYPES
from .accessories import HomeAccessory, add_preload_service, setup_char
from .const import (
CATEGORY_SENSOR, SERV_HUMIDITY_SENSOR, SERV_TEMPERATURE_SENSOR,
CHAR_CURRENT_HUMIDITY, CHAR_CURRENT_TEMPERATURE, PROP_CELSIUS,
SERV_AIR_QUALITY_SENSOR, CHAR_AIR_QUALITY, CHAR_AIR_PARTICULATE_DENSITY,
CHAR_CARBON_DIOXIDE_LEVEL, CHAR_CARBON_DIOXIDE_PEAK_LEVEL,
SERV_LIGHT_SENSOR, CHAR_CURRENT_AMBIENT_LIGHT_LEVEL,
DEVICE_CLASS_CO2, SERV_CARBON_DIOXIDE_SENSOR, CHAR_CARBON_DIOXIDE_DETECTED,
DEVICE_CLASS_GAS, SERV_CARBON_MONOXIDE_SENSOR,
CHAR_CARBON_MONOXIDE_DETECTED,
DEVICE_CLASS_MOISTURE, SERV_LEAK_SENSOR, CHAR_LEAK_DETECTED,
DEVICE_CLASS_MOTION, SERV_MOTION_SENSOR, CHAR_MOTION_DETECTED,
DEVICE_CLASS_OCCUPANCY, SERV_OCCUPANCY_SENSOR, CHAR_OCCUPANCY_DETECTED,
DEVICE_CLASS_OPENING, SERV_CONTACT_SENSOR, CHAR_CONTACT_SENSOR_STATE,
DEVICE_CLASS_SMOKE, SERV_SMOKE_SENSOR, CHAR_SMOKE_DETECTED)
from .util import (
convert_to_float, temperature_to_homekit, density_to_air_quality)
_LOGGER = logging.getLogger(__name__)
BINARY_SENSOR_SERVICE_MAP = {
DEVICE_CLASS_CO2: (SERV_CARBON_DIOXIDE_SENSOR,
CHAR_CARBON_DIOXIDE_DETECTED),
DEVICE_CLASS_GAS: (SERV_CARBON_MONOXIDE_SENSOR,
CHAR_CARBON_MONOXIDE_DETECTED),
DEVICE_CLASS_MOISTURE: (SERV_LEAK_SENSOR, CHAR_LEAK_DETECTED),
DEVICE_CLASS_MOTION: (SERV_MOTION_SENSOR, CHAR_MOTION_DETECTED),
DEVICE_CLASS_OCCUPANCY: (SERV_OCCUPANCY_SENSOR, CHAR_OCCUPANCY_DETECTED),
DEVICE_CLASS_OPENING: (SERV_CONTACT_SENSOR, CHAR_CONTACT_SENSOR_STATE),
DEVICE_CLASS_SMOKE: (SERV_SMOKE_SENSOR, CHAR_SMOKE_DETECTED)}
@TYPES.register('TemperatureSensor')
class TemperatureSensor(HomeAccessory):
"""Generate a TemperatureSensor accessory for a temperature sensor.
Sensor entity must return temperature in °C, °F.
"""
def __init__(self, *args, config):
"""Initialize a TemperatureSensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
serv_temp = add_preload_service(self, SERV_TEMPERATURE_SENSOR)
self.char_temp = setup_char(
CHAR_CURRENT_TEMPERATURE, serv_temp, value=0,
properties=PROP_CELSIUS)
self.unit = None
def update_state(self, new_state):
"""Update temperature after state changed."""
unit = new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS)
temperature = convert_to_float(new_state.state)
if temperature:
temperature = temperature_to_homekit(temperature, unit)
self.char_temp.set_value(temperature)
_LOGGER.debug('%s: Current temperature set to %d°C',
self.entity_id, temperature)
@TYPES.register('HumiditySensor')
class HumiditySensor(HomeAccessory):
"""Generate a HumiditySensor accessory as humidity sensor."""
def __init__(self, *args, config):
"""Initialize a HumiditySensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
serv_humidity = add_preload_service(self, SERV_HUMIDITY_SENSOR)
self.char_humidity = setup_char(
CHAR_CURRENT_HUMIDITY, serv_humidity, value=0)
def update_state(self, new_state):
"""Update accessory after state change."""
humidity = convert_to_float(new_state.state)
if humidity:
self.char_humidity.set_value(humidity)
_LOGGER.debug('%s: Percent set to %d%%',
self.entity_id, humidity)
@TYPES.register('AirQualitySensor')
class AirQualitySensor(HomeAccessory):
"""Generate a AirQualitySensor accessory as air quality sensor."""
def __init__(self, *args, config):
"""Initialize a AirQualitySensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
serv_air_quality = add_preload_service(self, SERV_AIR_QUALITY_SENSOR,
[CHAR_AIR_PARTICULATE_DENSITY])
self.char_quality = setup_char(
CHAR_AIR_QUALITY, serv_air_quality, value=0)
self.char_density = setup_char(
CHAR_AIR_PARTICULATE_DENSITY, serv_air_quality, value=0)
def update_state(self, new_state):
"""Update accessory after state change."""
density = convert_to_float(new_state.state)
if density is not None:
self.char_density.set_value(density)
self.char_quality.set_value(density_to_air_quality(density))
_LOGGER.debug('%s: Set to %d', self.entity_id, density)
@TYPES.register('CarbonDioxideSensor')
class CarbonDioxideSensor(HomeAccessory):
"""Generate a CarbonDioxideSensor accessory as CO2 sensor."""
def __init__(self, *args, config):
"""Initialize a CarbonDioxideSensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
serv_co2 = add_preload_service(self, SERV_CARBON_DIOXIDE_SENSOR, [
CHAR_CARBON_DIOXIDE_LEVEL, CHAR_CARBON_DIOXIDE_PEAK_LEVEL])
self.char_co2 = setup_char(
CHAR_CARBON_DIOXIDE_LEVEL, serv_co2, value=0)
self.char_peak = setup_char(
CHAR_CARBON_DIOXIDE_PEAK_LEVEL, serv_co2, value=0)
self.char_detected = setup_char(
CHAR_CARBON_DIOXIDE_DETECTED, serv_co2, value=0)
def update_state(self, new_state):
"""Update accessory after state change."""
co2 = convert_to_float(new_state.state)
if co2 is not None:
self.char_co2.set_value(co2)
if co2 > self.char_peak.value:
self.char_peak.set_value(co2)
self.char_detected.set_value(co2 > 1000)
_LOGGER.debug('%s: Set to %d', self.entity_id, co2)
@TYPES.register('LightSensor')
class LightSensor(HomeAccessory):
"""Generate a LightSensor accessory as light sensor."""
def __init__(self, *args, config):
"""Initialize a LightSensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
serv_light = add_preload_service(self, SERV_LIGHT_SENSOR)
self.char_light = setup_char(
CHAR_CURRENT_AMBIENT_LIGHT_LEVEL, serv_light, value=0)
def update_state(self, new_state):
"""Update accessory after state change."""
luminance = convert_to_float(new_state.state)
if luminance is not None:
self.char_light.set_value(luminance)
_LOGGER.debug('%s: Set to %d', self.entity_id, luminance)
@TYPES.register('BinarySensor')
class BinarySensor(HomeAccessory):
"""Generate a BinarySensor accessory as binary sensor."""
def __init__(self, *args, config):
"""Initialize a BinarySensor accessory object."""
super().__init__(*args, category=CATEGORY_SENSOR)
device_class = self.hass.states.get(self.entity_id).attributes \
.get(ATTR_DEVICE_CLASS)
service_char = BINARY_SENSOR_SERVICE_MAP[device_class] \
if device_class in BINARY_SENSOR_SERVICE_MAP \
else BINARY_SENSOR_SERVICE_MAP[DEVICE_CLASS_OCCUPANCY]
service = add_preload_service(self, service_char[0])
self.char_detected = setup_char(service_char[1], service, value=0)
def update_state(self, new_state):
"""Update accessory after state change."""
state = new_state.state
detected = (state == STATE_ON) or (state == STATE_HOME)
self.char_detected.set_value(detected)
_LOGGER.debug('%s: Set to %d', self.entity_id, detected)
| [
37811,
9487,
284,
1745,
477,
12694,
18199,
526,
15931,
198,
11748,
18931,
198,
198,
6738,
1363,
562,
10167,
13,
9979,
1330,
357,
198,
220,
220,
220,
5161,
5446,
62,
4944,
2043,
62,
19238,
62,
11682,
1921,
11335,
10979,
11,
309,
39494,
... | 2.238784 | 3,455 |
from karatsuba import karatsuba
if __name__ == '__main__':
print(karatsuba.karatsuba(3141592653589793238462643383279502884197169399375105820974944592,
2718281828459045235360287471352662497757247093699959574966967627)) | [
6738,
479,
283,
1381,
22013,
1330,
479,
283,
1381,
22013,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
21070,
1381,
22013,
13,
21070,
1381,
22013,
7,
33638,
19707,
22980,
2327,
4531,
... | 2.04878 | 123 |
import argparse
import os
import time
import numpy as np
from . import util,dsp,plot | [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
1330,
7736,
11,
67,
2777,
11,
29487
] | 3.36 | 25 |
from hashlib import md5
import logging
import base64
h = logging.StreamHandler()
serial_log = logging.Logger(__name__)
h.setFormatter(logging.Formatter("%(levelname)s:%(name)s:%(message)s"))
serial_log.addHandler(h)
serial_log.critical("""
# NOTE: This program serialises objects that if saved to an external source can be directly modified by the end user this may result in a security risk if
# 1) Any serialisable object has any functionality the end user should not be allowed to access
# 2) Any data that is deserialised is passed to any sensitive function i.e. ('exec', 'eval', 'os.system', etc), (NOTE: Doing so is NEVER a good idea)
""")
serial_log.setLevel(logging.ERROR)
# for global class_space for root serialisation
global class_space
class_space = dict()
# ====================== SERIAL OBJECTS AND BASIC CONVERSION ======================
# ====================== MAIN SERIAL FUNCTIONALITY ======================
sm = RootSerialManager() # access though to the global class_space
serialisable = sm.serialisable
linkedserialisable = sm.linkedserialisable
Serialiser = sm.Serialiser
Constructor = sm.Constructor
| [
6738,
12234,
8019,
1330,
45243,
20,
198,
11748,
18931,
198,
11748,
2779,
2414,
628,
198,
71,
796,
18931,
13,
12124,
25060,
3419,
198,
46911,
62,
6404,
796,
18931,
13,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
71,
13,
2617,
8479,
... | 3.475904 | 332 |
import uuid
from django.db import models
| [
11748,
334,
27112,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.307692 | 13 |
"""Test the util.interfaces module."""
# Builtins
import abc
# Packages
from phylline.util.interfaces import InterfaceClass, SetterProperty
class Interface(object, metaclass=InterfaceClass):
"""Interface class for testing interfaces."""
def __init__(self):
"""Initialize members."""
self.value = None
@abc.abstractmethod
def method(self):
"""Do nothing."""
pass
class SecondInterface(object, metaclass=InterfaceClass):
"""Interface class for testing intefaces."""
@abc.abstractmethod
class Derived(Interface):
"""Derived class for testing interfaces."""
@SetterProperty
def settable(self, value):
"""Modify the value member."""
self.value = value
class SecondDerived(SecondInterface):
"""Derived class for testing interfaces."""
def test_interface_class():
"""Test whether the InterfaceClass works for docstring inheritance."""
test_object = Derived()
assert test_object.value is None
assert test_object.method.__doc__ == 'Do nothing.'
test_object.method()
assert test_object.value == 0
test_object = SecondDerived()
assert test_object.method()
assert test_object.method.__doc__ == None
def test_setter_property():
"""Test whether the SetterProperty descriptor works for setting values."""
test_object = Derived()
assert test_object.value is None
test_object.settable = 42
assert test_object.value == 42
| [
37811,
14402,
262,
7736,
13,
3849,
32186,
8265,
526,
15931,
198,
198,
2,
28477,
1040,
198,
198,
11748,
450,
66,
198,
198,
2,
6400,
1095,
198,
198,
6738,
872,
25727,
500,
13,
22602,
13,
3849,
32186,
1330,
26491,
9487,
11,
5345,
353,
... | 3.028866 | 485 |
import cv2
from skimage.color import rgb2hsv,hsv2rgb
import numpy as np
from skimage.color import rgb2lab, lab2rgb
from global_StretchingL import global_stretching
from global_stretching_ab import global_Stretching_ab
| [
11748,
269,
85,
17,
198,
6738,
1341,
9060,
13,
8043,
1330,
46140,
17,
11994,
85,
11,
11994,
85,
17,
81,
22296,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
9060,
13,
8043,
1330,
46140,
17,
23912,
11,
2248,
17,
81,
22296,
198... | 2.92 | 75 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2018-05-25 06:47
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
940,
319,
2864,
12,
2713,
12,
1495,
9130,
25,
2857,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
import sys
import random
import Event
import numpy as np
# 1 is couple, 0 is single, affecting the risk
# allocate this person according to the risk
# get the probability of affecting
| [
11748,
25064,
198,
11748,
4738,
198,
11748,
8558,
198,
11748,
299,
32152,
355,
45941,
628,
628,
220,
220,
220,
1303,
352,
318,
3155,
11,
657,
318,
2060,
11,
13891,
262,
2526,
628,
220,
220,
220,
1303,
31935,
428,
1048,
1864,
284,
262,... | 3.722222 | 54 |
import curses
from curses import KEY_RIGHT, KEY_LEFT, KEY_DOWN, KEY_UP
from random import randint
WIDTH = 35
HEIGHT = 20
MAX_X = WIDTH - 2
MAX_Y = HEIGHT - 2
SNAKE_LENGTH = 5
SNAKE_X = SNAKE_LENGTH + 1
SNAKE_Y = 3
TIMEOUT = 100
if __name__ == '__main__':
curses.initscr()
curses.beep()
curses.beep()
window = curses.newwin(HEIGHT, WIDTH, 0, 0)
window.timeout(TIMEOUT)
window.keypad(1)
curses.noecho()
curses.curs_set(0)
window.border(0)
snake = Snake(SNAKE_X, SNAKE_Y, window)
food = Food(window, '*')
while True:
window.clear()
window.border(0)
snake.render()
food.render()
window.addstr(0, 5, snake.score)
event = window.getch()
if event == 27:
break
if event in [KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT]:
snake.change_direction(event)
if snake.head.x == food.x and snake.head.y == food.y:
snake.eat_food(food)
if event == 32:
key = -1
while key != 32:
key = window.getch()
snake.update()
if snake.collided:
break
curses.endwin()
| [
198,
11748,
43878,
198,
6738,
43878,
1330,
35374,
62,
49,
9947,
11,
35374,
62,
2538,
9792,
11,
35374,
62,
41925,
11,
35374,
62,
8577,
198,
6738,
4738,
1330,
43720,
600,
198,
198,
54,
2389,
4221,
796,
3439,
198,
13909,
9947,
796,
1160,... | 2.010256 | 585 |
import os
import pandas as pd
import sys
import json
import random
from collections import defaultdict
import csv
import pickle
# 22179163 taobao单个用户最大scale
random.seed(1230)
name = 'book'
filter_size = 80 # 过滤item
filter_len = 0 # 过滤item history
# 把不对的时间筛掉
def read_from_amazon(source):
"""
:param source: ../data/book_data/merged_book_data.json
内含UserID ItemID CateID/cate_string timestamp
:return: item_count, users
"""
users = defaultdict(list)
item_count = defaultdict(int)
print("amazon data reading")
cate_map = {}
cate_index = 0
with open(source, 'r') as f:
for line in f:
# 空格划分
r = json.loads(line.strip())
uid = r['user_id']
iid = r['item_id']
cate = r['cate']
if cate not in cate_map:
cate_map[cate] = cate_index
cate_index = cate_index + 1
cid = cate_map[cate]
ts = float(r['timestamp'])
item_count[iid] += 1
users[uid].append((iid, cid, ts))
return item_count, users
# 把user_map里面的user都写入文档
if __name__ == '__main__':
main() | [
11748,
28686,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
25064,
201,
198,
11748,
33918,
201,
198,
11748,
4738,
201,
198,
6738,
17268,
1330,
4277,
11600,
201,
198,
11748,
269,
21370,
201,
198,
11748,
2298,
293,
201,
198,... | 1.805596 | 679 |
import pickle
import numpy as np
import scipy
import sklearn
from sklearn.metrics import confusion_matrix
import torch
| [
11748,
2298,
293,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
629,
541,
88,
201,
198,
11748,
1341,
35720,
201,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
10802,
62,
6759,
8609,
201,
198,
11748,
28034,
201,
198,
201,
... | 2.148649 | 74 |
from setuptools import setup, Extension
import numpy as np
from Cython.Distutils import build_ext
requires = ['h5py',
'pandas',
'scipy',
'george',
'emcee',
'matplotlib',
'bokeh==0.10',
'astropy',
'corner',
'scikit-learn',
'lmfit',
'scikit-monaco',
'statsmodels',
'pysynphot',
'cython',
'pymultinest',
'seaborn',
'astroquery',
'isochrones',
'configobj'
]
data_files = {'kglib.spectral_type': ['data/*'],
'kglib.stellar_data': ['data/*']}
optional_requires = ['astropysics',
'pyraf', 'mlpy',
'anfft']
setup(name='kglib',
version='0.2.0',
author='Kevin Gullikson',
author_email='kevin.gullikson@gmail.com',
url="https://github.com/kgullikson88/gullikson-scripts",
description='A series of packages for my analysis',
license='The MIT License: http://www.opensource.org/licenses/mit-license.php',
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',],
packages=['kglib', 'kglib.cross_correlation', 'kglib.isochrone_helpers',
'kglib.fitters', 'kglib.utils', 'kglib.spectral_type',
'kglib.stellar_models', 'kglib.stellar_data'],
package_data=data_files,
setup_requires=['cython', 'numpy>=1.6'],
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension("kglib.stellar_models.RotBroad_Fast", ["kglib/stellar_models/RotBroad_Fast.c"],
include_dirs=[np.get_include()], extra_compile_args=["-O3"]),
Extension("kglib.utils.FittingUtilities", ["kglib/utils/FittingUtilities.c"],
include_dirs=[np.get_include()],
extra_compile_args=["-O3", "-funroll-loops"]),
],
install_requires=requires,
extras_require={'Extra stuff': optional_requires},
include_package_data=True
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
27995,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
327,
7535,
13,
20344,
26791,
1330,
1382,
62,
2302,
198,
198,
47911,
796,
37250,
71,
20,
9078,
3256,
198,
220,
220,
220,
220,
220,
220,
220,... | 1.966985 | 1,151 |
"""This module contains the utils functions of the library."""
import re
import random
import numpy as np
from typing import Callable, List, Optional, Sequence, Tuple, Union
from importlib import util
from skimage.segmentation import slic, felzenszwalb
from ..helpers.model_interface import ModelInterface
if util.find_spec("torch"):
import torch
from ..helpers.pytorch_model import PyTorchModel
if util.find_spec("tensorflow"):
import tensorflow as tf
from ..helpers.tf_model import TensorFlowModel
def get_superpixel_segments(img: np.ndarray, segmentation_method: str) -> np.ndarray:
"""Given an image, return segments or so-called 'super-pixels' segments i.e., an 2D mask with segment labels."""
if img.ndim != 3:
raise ValueError(
"Make sure that x is 3 dimensional e.g., (3, 224, 224) to calculate super-pixels."
f" shape: {img.shape}")
if segmentation_method not in ["slic", "felzenszwalb"]:
raise ValueError("'segmentation_method' must be either 'slic' or 'felzenszwalb'.")
if segmentation_method == "slic":
return slic(img, start_label=0)
elif segmentation_method == "felzenszwalb":
return felzenszwalb(
img,
)
def get_baseline_value(choice: Union[float, int, str, None], arr: np.ndarray,
patch: Optional[np.ndarray] = None, **kwargs) -> float:
"""Get the baseline value (float) to fill tensor with."""
if choice is None:
assert (
("perturb_baseline" in kwargs)
or ("fixed_values" in kwargs)
or ("constant_value" in kwargs)
or ("input_shift" in kwargs)
), (
"Specify"
"a 'perturb_baseline', 'fixed_values', 'constant_value' or 'input_shift' e.g., 0.0 or 'black' for "
"pixel replacement or 'baseline_values' containing an array with one value per index for replacement."
)
if "fixed_values" in kwargs:
return kwargs["fixed_values"]
if isinstance(choice, (float, int)):
return choice
elif isinstance(choice, str):
fill_dict = get_baseline_dict(arr, patch)
if choice.lower() not in fill_dict:
raise ValueError(
f"Ensure that 'choice'(str) is in {list(fill_dict.keys())}"
)
return fill_dict[choice.lower()]
else:
raise ValueError(
"Specify 'perturb_baseline' or 'constant_value' as a string, integer or float."
)
def get_baseline_dict(arr: np.ndarray, patch: Optional[np.ndarray] = None) -> dict:
"""Make a dicionary of baseline approaches depending on the input x (or patch of input)."""
fill_dict = {
"mean": float(arr.mean()),
"random": float(random.random()),
"uniform": float(random.uniform(arr.min(), arr.max())),
"black": float(arr.min()),
"white": float(arr.max()),
}
if patch is not None:
fill_dict["neighbourhood_mean"] = (float(patch.mean()),)
fill_dict["neighbourhood_random_min_max"] = (
float(random.uniform(patch.min(), patch.max())),
)
return fill_dict
def get_name(str: str):
"""Get the name of the class object"""
if str.isupper():
return str
return " ".join(re.sub(r"([A-Z])", r" \1", str).split())
def filter_compatible_patch_sizes(perturb_patch_sizes: list, img_size: int) -> list:
"""Remove patch sizes that are not compatible with input size."""
return [i for i in perturb_patch_sizes if img_size % i == 0]
def infer_channel_first(x: np.array):
"""
For 1d input:
Assumption: nr_channels < sequence_length
Returns True if input shape is (nr_batch, nr_channels, sequence_length).
Returns False if input shape is (nr_batch, sequence_length, nr_channels).
An error is raised if the two last dimensions are equal.
For 2d input:
Assumption: nr_channels < img_width and nr_channels < img_height
Returns True if input shape is (nr_batch, nr_channels, img_width, img_height).
Returns False if input shape is (nr_batch, img_width, img_height, nr_channels).
An error is raised if the three last dimensions are equal.
For higher dimensional input an error is raised.
"""
err_msg = "Ambiguous input shape. Cannot infer channel-first/channel-last order."
if len(np.shape(x)) == 3:
if np.shape(x)[-2] < np.shape(x)[-1]:
return True
elif np.shape(x)[-2] > np.shape(x)[-1]:
return False
else:
raise ValueError(err_msg)
elif len(np.shape(x)) == 4:
if np.shape(x)[-1] < np.shape(x)[-2] and np.shape(x)[-1] < np.shape(x)[-3]:
return False
if np.shape(x)[-3] < np.shape(x)[-1] and np.shape(x)[-3] < np.shape(x)[-2]:
return True
raise ValueError(err_msg)
else:
raise ValueError(
"Only batched 1d and 2d multi-channel input dimensions supported.")
def make_channel_first(x: np.array, channel_first=False):
"""
Reshape batch to channel first.
"""
if channel_first:
return x
if len(np.shape(x)) == 4:
return np.moveaxis(x, -1, -3)
elif len(np.shape(x)) == 3:
return np.moveaxis(x, -1, -2)
else:
raise ValueError(
"Only batched 1d and 2d multi-channel input dimensions supported.")
def make_channel_last(x: np.array, channel_first=True):
"""
Reshape batch to channel last.
"""
if not channel_first:
return x
if len(np.shape(x)) == 4:
return np.moveaxis(x, -3, -1)
elif len(np.shape(x)) == 3:
return np.moveaxis(x, -2, -1)
else:
raise ValueError(
"Only batched 1d and 2d multi-channel input dimensions supported.")
def get_wrapped_model(model: ModelInterface, channel_first: bool) -> ModelInterface:
"""
Identifies the type of a model object and wraps the model in an appropriate interface.
Return wrapped model.
"""
if isinstance(model, tf.keras.Model):
return TensorFlowModel(model, channel_first)
if isinstance(model, torch.nn.modules.module.Module):
return PyTorchModel(model, channel_first)
raise ValueError(
"Model needs to be tf.keras.Model or torch.nn.modules.module.Module."
)
def conv2D_numpy(
x: np.array, kernel: np.array, stride: int, padding: int, groups: int, pad_output: bool = False
) -> np.array:
"""
Computes 2D convolution in numpy
Assumes: Shape of x is [C_in, H, W] with C_in = input channels and H, W input height and weight, respectively
Shape of kernel is [C_out, C_in/groups, K, K] with C_out = output channels and K = kernel size
"""
# Pad input
x = np.pad(x, [(0, 0), (padding, padding), (padding, padding)], mode="constant")
# Get shapes
c_in, height, width = x.shape
c_out, kernel_size = kernel.shape[0], kernel.shape[2]
# Handle groups
assert c_in % groups == 0
assert c_out % groups == 0
assert kernel.shape[1] * groups == c_in
c_in_g = c_in // groups
c_out_g = c_out // groups
# Build output
output_height = (height - kernel_size) // stride + 1
output_width = (width - kernel_size) // stride + 1
output = np.zeros((c_out, output_height, output_width)).astype(x.dtype)
# TODO: improve efficiency, less loops
for g in range(groups):
for c in range(c_out_g * g, c_out_g * (g + 1)):
for h in range(output_height):
for w in range(output_width):
output[c][h][w] = np.multiply(
x[
c_in_g * g : c_in_g * (g + 1),
h * stride : h * stride + kernel_size,
w * stride : w * stride + kernel_size,
],
kernel[c, :, :, :],
).sum()
if pad_output:
if stride != 1 or padding != 0:
raise NotImplementedError()
padwidth = (kernel_size - 1) // 2
output = np.pad(
output,
(
(0, 0),
(padwidth + padwidth % 2, padwidth),
(padwidth + padwidth % 2, padwidth),
),
mode="edge"
)
return output
def create_patch_slice(patch_size: Union[int, Sequence[int]], coords: Sequence[int],
expand_first_dim: bool) -> Tuple[Sequence[int]]:
"""
Create a patch slice from patch size and coordinates.
expand_first_dim: set to True if you want to add one ':'-slice at the beginning.
"""
if isinstance(patch_size, int):
patch_size = (patch_size, )
if isinstance(coords, int):
coords = (coords, )
patch_size = np.array(patch_size)
coords = tuple(coords)
if len(patch_size) == 1 and len(coords) != 1:
patch_size = tuple(patch_size for _ in coords)
elif patch_size.ndim != 1:
raise ValueError("patch_size has to be either a scalar or a 1d-sequence")
elif len(patch_size) != len(coords):
raise ValueError(
"patch_size sequence length does not match coords length"
f" (len(patch_size) != len(coords))"
)
# make sure that each element in tuple is integer
patch_size = tuple(int(patch_size_dim) for patch_size_dim in patch_size)
patch_slice = [slice(coord, coord + patch_size_dim)
for coord, patch_size_dim in zip(coords, patch_size)]
# Prepend slice for all channels.
if expand_first_dim:
patch_slice = [slice(None), *patch_slice]
return tuple(patch_slice)
def expand_attribution_channel(a: np.ndarray, x: np.ndarray):
"""
Expand additional channel dimension for attributions if needed.
"""
if a.shape[0] != x.shape[0]:
raise ValueError(
f"a and x must have same number of batches ({a.shape[0]} != {x.shape[0]})")
if a.ndim > x.ndim:
raise ValueError(
f"a must not have greater ndim than x ({a.ndim} > {x.ndim})")
if a.ndim < x.ndim - 1:
raise ValueError(
f"a can have at max one dimension less than x ({a.ndim} < {x.ndim} - 1)")
if a.ndim == x.ndim:
return a
elif a.ndim == x.ndim - 1:
return np.expand_dims(a, axis=1)
def get_nr_patches(patch_size: Union[int, Sequence[int]],
shape: Tuple[int, ...],
overlap: bool = False) -> int:
""" Get number of patches for given shape """
if isinstance(patch_size, int):
patch_size = (patch_size, )
patch_size = np.array(patch_size)
if len(patch_size) == 1 and len(shape) != 1:
patch_size = tuple(patch_size for _ in shape)
elif patch_size.ndim != 1:
raise ValueError("patch_size has to be either a scalar or a 1d-sequence")
elif len(patch_size) != len(shape):
raise ValueError(
"patch_size sequence length does not match shape length"
f" (len(patch_size) != len(shape))"
)
patch_size = tuple(patch_size)
return np.prod(shape) // np.prod(patch_size)
| [
37811,
1212,
8265,
4909,
262,
3384,
4487,
5499,
286,
262,
5888,
526,
15931,
198,
11748,
302,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
4889,
540,
11,
7343,
11,
32233,
11,
45835,
11,
309,
29291,
11,
... | 2.299278 | 4,845 |
from datetime import datetime
from unittest import TestCase
from unittest.mock import patch
from common.repository import Repository
from dapp_user.config import NETWORK_ID, NETWORKS
from dapp_user.domain.services.user_service import UserService
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
2219,
13,
260,
1930,
37765,
1330,
1432,
13264,
198,
6738,
288,
1324,
62,
7220,
13,
1... | 3.647059 | 68 |
import logging
import os
import numpy as np
from keras_preprocessing.image import Iterator, load_img, img_to_array, array_to_img
from keras_preprocessing.image.iterator import BatchFromFilesMixin
from keras_preprocessing.image.directory_iterator import DirectoryIterator
class MildIterator(Iterator):
"""Abstract base class for image data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
| [
11748,
18931,
198,
11748,
28686,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
62,
3866,
36948,
13,
9060,
1330,
40806,
1352,
11,
3440,
62,
9600,
11,
33705,
62,
1462,
62,
18747,
11,
7177,
62,
1462,
62,
9600,
198,
6738... | 3.257895 | 190 |
from vimba import *
import sys
sys.path.append('D:/Dropbox/py_projects/vimba-api/src/')
import vimbaapilib as val
from time import sleep, time_ns
if __name__ == '__main__':
main() | [
6738,
43907,
7012,
1330,
1635,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
10786,
35,
14079,
26932,
3524,
14,
9078,
62,
42068,
14,
31124,
7012,
12,
15042,
14,
10677,
14,
11537,
198,
11748,
43907,
7012,
499,
22282,
355,
1188,
198... | 2.761194 | 67 |
name = "nwbindexer"
| [
3672,
796,
366,
77,
39346,
9630,
263,
1,
198
] | 2.222222 | 9 |
#!/usr/bin/env python
import argparse
import logging
import logging.config
import os
import random
import signal
from dataclasses import asdict, dataclass
from typing import Dict, NamedTuple
import gym
import numpy as np
import torch
import torch.nn as nn
import wandb
from gym_gridverse.rng import reset_gv_rng
from asym_rlpo.algorithms import PO_A2C_ABC, make_a2c_algorithm
from asym_rlpo.envs import make_env
from asym_rlpo.evaluation import evaluate_returns
from asym_rlpo.q_estimators import q_estimator_factory
from asym_rlpo.sampling import sample_episodes
from asym_rlpo.utils.aggregate import average
from asym_rlpo.utils.checkpointing import Serializable, load_data, save_data
from asym_rlpo.utils.config import get_config
from asym_rlpo.utils.device import get_device
from asym_rlpo.utils.running_average import (
InfiniteRunningAverage,
RunningAverage,
WindowRunningAverage,
)
from asym_rlpo.utils.scheduling import make_schedule
from asym_rlpo.utils.timer import Dispenser, Timer
from asym_rlpo.utils.wandb_logger import WandbLogger
logger = logging.getLogger(__name__)
@dataclass
# NOTE: namedtuple does not allow multiple inheritance.. luckily Serializable
# is only an interface...
# class RunState(NamedTuple, Serializable):
if __name__ == '__main__':
logging.config.dictConfig(
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default_handler': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'standard',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'': {
'handlers': ['default_handler'],
'level': 'DEBUG',
'propagate': False,
}
},
}
)
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
6737,
198,
6738,
4818,
330,
28958,
1330,
355,
11600,
11,
4818,
330,
... | 2.202929 | 956 |
import getpass
import logging
import os
import phanas.nas
import phanas.file_utils
import subprocess
import sys
from pathlib import Path
from subprocess import PIPE
MOUNT_DIR_NAME = "__NAS__"
| [
11748,
651,
6603,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
872,
15991,
13,
24716,
198,
11748,
872,
15991,
13,
7753,
62,
26791,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
850,
... | 3.233333 | 60 |
from django.urls.base import reverse_lazy
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormMixin, FormView
from django.views.generic.list import ListView
from products.forms import Reviewform
from products.models import Color, Product, ProductCategory, Review, Size
from django.shortcuts import render
from django.views.generic.base import TemplateView
# Create your views here.
# def ProductDetail(request):
# return render(request, 'product-detail.html')
| [
6738,
42625,
14208,
13,
6371,
82,
13,
8692,
1330,
9575,
62,
75,
12582,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
49170,
1330,
42585,
7680,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
19312,
1330,
5178,
35608,
259,
11,
... | 3.541096 | 146 |
import compas
from compas.datastructures import Mesh
from compas_viewers import MeshViewer
viewer = MeshViewer()
viewer.mesh = Mesh.from_polyhedron(6)
viewer.show()
| [
11748,
552,
292,
198,
6738,
552,
292,
13,
19608,
459,
1356,
942,
1330,
47529,
198,
6738,
552,
292,
62,
1177,
364,
1330,
47529,
7680,
263,
628,
198,
1177,
263,
796,
47529,
7680,
263,
3419,
198,
1177,
263,
13,
76,
5069,
796,
47529,
13... | 2.847458 | 59 |
# Load external modules.
from datetime import datetime, timedelta
import logging
import numpy as np
import pandas as pd
# Load internal modules.
import constants as const
import storage
import utils
#########
# NODES #
#########
#########
# EDGES #
#########
| [
2,
8778,
7097,
13103,
13,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
8778,
5387,
13103,
13,
198,
11748,
38491,
3... | 3.447368 | 76 |
from ipaddress import IPv4Address, IPv6Address
import click
import pytest
from click_params.network import (
IP_ADDRESS, IPV4_ADDRESS, IPV6_ADDRESS, IP_NETWORK, IPV4_NETWORK, IPV6_NETWORK, Ipv4AddressRange, Ipv6AddressRange,
IpAddressListParamType, Ipv4AddressListParamType, Ipv6AddressListParamType, IpNetworkListParamType,
Ipv4NetworkListParamType, Ipv6NetworkListParamType
)
from tests.helpers import assert_in_output, assert_equals_output
@pytest.mark.parametrize(('name', 'parameter'), [
('ip address', IP_ADDRESS),
('ipv4 address', IPV4_ADDRESS),
('ipv6 address', IPV6_ADDRESS),
('ip network', IP_NETWORK),
('ip address list', IpAddressListParamType()),
('ipv4 address list', Ipv4AddressListParamType()),
('ipv6 address list', Ipv6AddressListParamType()),
('ip network list', IpNetworkListParamType()),
('ipv4 network list', Ipv4NetworkListParamType()),
('ipv6 network list', Ipv6NetworkListParamType())
])
@pytest.mark.parametrize(('name', 'representation', 'parameter'), [
('ipv4 address range', f'IPV4AddressRange({repr(IPv4Address("127.0.0.1"))}, {repr(IPv4Address("127.0.0.5"))})',
Ipv4AddressRange(IPv4Address('127.0.0.1'), IPv4Address('127.0.0.5'))),
('ipv6 address range', f'IPV6AddressRange({repr(IPv6Address("::1"))}, {repr(IPv6Address("::10"))})',
Ipv6AddressRange(IPv6Address('::1'), IPv6Address('::10')))
])
@pytest.mark.parametrize(('parameter', 'param_value'), [
# generic ip address (ipv4 or ipv6)
(IP_ADDRESS, 'foo'),
(IP_ADDRESS, '1245'),
(IP_ADDRESS, '125.5'),
# ipv4 address
(IPV4_ADDRESS, 'foo'),
(IPV4_ADDRESS, '1245'),
(IPV4_ADDRESS, '125.5'),
# ipv6 address
(IPV6_ADDRESS, 'foo'),
(IPV6_ADDRESS, '1245'),
(IPV6_ADDRESS, '125.5'),
# generic ip network (ipv4 or ipv6)
(IP_NETWORK, 'foo'),
(IP_NETWORK, '1245'),
(IP_NETWORK, '1452.5'),
(IP_NETWORK, '12.0.0.0/45'),
(IP_NETWORK, '1245/24'),
(IP_NETWORK, '2001:db00::0/ffff:ff00::'),
# ipv4 network
(IPV4_NETWORK, 'foo'),
(IPV4_NETWORK, '1245'),
(IPV4_NETWORK, '1452.5'),
(IPV4_NETWORK, '12.0.0.0/45'),
(IPV4_NETWORK, '1245/24'),
# ipv6 network
(IPV6_NETWORK, 'foo'),
(IPV6_NETWORK, '1245'),
(IPV6_NETWORK, '1452.5'),
(IPV6_NETWORK, '2001:db00::0/ffff:ff00::'),
])
@pytest.mark.parametrize(('parameter', 'expression', 'message'), [
(IpAddressListParamType(' '), 'foo 10.0.0.1 1452', "ip addresses: ['foo', '1452']"),
(Ipv4AddressListParamType(', '), '10.0.0.1, foo, ::1', "ipv4 addresses: ['foo', '::1']"),
(Ipv6AddressListParamType(' '), '::1 foo ::dead:beef 10.0.0.1', "ipv6 addresses: ['foo', '10.0.0.1']"),
(IpNetworkListParamType(' '), '192.168.1.0/24 foo 1254 2001:db00::/24', "ip networks: ['foo', '1254']"),
(Ipv4NetworkListParamType(' '), '10.0.0.0/8 152 192.168.1.0/24', "ipv4 networks: ['152']"),
(Ipv6NetworkListParamType(' '), '2001:db00::/24 foo 2001:db00::0/ffff:ff00::',
"ipv6 networks: ['foo', '2001:db00::0/ffff:ff00::']")
])
@pytest.mark.parametrize(('parameter', 'value', 'message'), [
(Ipv4AddressRange(IPv4Address('192.168.1.1'), IPv4Address('192.168.1.254')), '192.169.1.1',
'192.169.1.1 is not in the valid range of 192.168.1.1 to 192.168.1.254.'),
(Ipv6AddressRange(IPv6Address('2001:db00::1'), IPv6Address('2001:dbff:ffff:ffff:ffff:ffff:ffff:fffe')),
IPv6Address('2001:dc00::9'),
'2001:dc00::9 is not in the valid range of 2001:db00::1 to 2001:dbff:ffff:ffff:ffff:ffff:ffff:fffe.')
])
@pytest.mark.parametrize(('parameter', 'param_value'), [
(IP_ADDRESS, '192.168.1.1'),
(IP_ADDRESS, '::dead:beef'),
(IPV4_ADDRESS, '192.168.1.1'),
(IPV6_ADDRESS, '::dead:beef'),
(IP_NETWORK, '192.168.0.0/24'),
(IP_NETWORK, '2001:db00::/24'),
(IPV4_NETWORK, '192.168.0.0/24'),
(IPV6_NETWORK, '2001:db00::/24'),
(Ipv4AddressRange(IPv4Address('192.168.1.1'), IPv4Address('192.168.1.254')), '192.168.1.1'),
(Ipv6AddressRange(IPv6Address('2001:db00::1'), IPv6Address('2001:dbff:ffff:ffff:ffff:ffff:ffff:fffe')),
'2001:db00::4')
])
@pytest.mark.parametrize(('parameter', 'expression', 'expected_output'), [
# ip address list
(IpAddressListParamType(), '192.168.1.2,::dead:beef', "[IPv4Address('192.168.1.2'), IPv6Address('::dead:beef')]\n"),
(IpAddressListParamType(' '), '192.168.1.2 ::dead:beef',
"[IPv4Address('192.168.1.2'), IPv6Address('::dead:beef')]\n"),
# ipv4 address list
(Ipv4AddressListParamType(), '10.0.0.1,192.168.1.2', "[IPv4Address('10.0.0.1'), IPv4Address('192.168.1.2')]\n"),
(Ipv4AddressListParamType(' '), '10.0.0.1 192.168.1.2', "[IPv4Address('10.0.0.1'), IPv4Address('192.168.1.2')]\n"),
# ipv6 address list
(Ipv6AddressListParamType(), '::1,::dead:beef', "[IPv6Address('::1'), IPv6Address('::dead:beef')]\n"),
(Ipv6AddressListParamType(', '), '::1, ::dead:beef', "[IPv6Address('::1'), IPv6Address('::dead:beef')]\n"),
# ip network list
(IpNetworkListParamType(), '192.168.1.0/24,2001:db00::/24',
"[IPv4Network('192.168.1.0/24'), IPv6Network('2001:db00::/24')]\n"),
(IpNetworkListParamType(' '), '192.168.1.0/24 2001:db00::/24',
"[IPv4Network('192.168.1.0/24'), IPv6Network('2001:db00::/24')]\n"),
# ipv4 network list
(Ipv4NetworkListParamType(), '10.0.0.0/8,192.168.1.0/24',
"[IPv4Network('10.0.0.0/8'), IPv4Network('192.168.1.0/24')]\n"),
(Ipv4NetworkListParamType(', '), '10.0.0.0/8, 192.168.1.0/24',
"[IPv4Network('10.0.0.0/8'), IPv4Network('192.168.1.0/24')]\n"),
# ipv6 network list
(Ipv6NetworkListParamType(), '2001:db00::/24,2001:db8:1234::/48',
"[IPv6Network('2001:db00::/24'), IPv6Network('2001:db8:1234::/48')]\n"),
(Ipv6NetworkListParamType(', '), '2001:db00::/24, 2001:db8:1234::/48',
"[IPv6Network('2001:db00::/24'), IPv6Network('2001:db8:1234::/48')]\n"),
])
@pytest.mark.parametrize("param_type", [
IpAddressListParamType, Ipv4AddressListParamType, Ipv6AddressListParamType, IpNetworkListParamType,
Ipv4NetworkListParamType, Ipv6NetworkListParamType
])
| [
6738,
20966,
21975,
1330,
25961,
19,
20231,
11,
25961,
21,
20231,
198,
198,
11748,
3904,
198,
11748,
12972,
9288,
198,
198,
6738,
3904,
62,
37266,
13,
27349,
1330,
357,
198,
220,
220,
220,
6101,
62,
2885,
7707,
7597,
11,
6101,
53,
19,... | 2.141301 | 2,845 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right | [
2,
30396,
329,
257,
13934,
5509,
10139,
13,
198,
2,
1398,
12200,
19667,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
1188,
28,
15,
11,
1364,
28,
14202,
11,
826,
28,
14202,
2599,
198,
2,
220,
220,
220,
220,... | 2.27381 | 84 |
#!/usr/bin/env python3
__author__ = 'konradk'
import sys
import argparse
from pprint import pprint
from datetime import date
from collections import Counter
from tqdm import tqdm
from gnomad.utils import slack
from ukbb_pan_ancestry import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--overwrite', help='Overwrite everything', action='store_true')
parser.add_argument('--run_basic_load', help='Overwrite everything', action='store_true')
parser.add_argument('--run_additional_load', help='Overwrite everything', action='store_true')
parser.add_argument('--run_combine_load', help='Overwrite everything', action='store_true')
parser.add_argument('--dry_run', help='Overwrite everything', action='store_true')
parser.add_argument('--load_only', help='Comma-separated list of trait_type-pheno-coding to run'
'(e.g. continuous-50-irnt,icd_all-E10-icd10 )')
parser.add_argument('--force_reload', help='Comma-separated list of trait_type-pheno-coding to force reload'
'(e.g. continuous-50-irnt,icd_all-E10-icd10 )')
parser.add_argument('--find_errors', help='Overwrite everything', action='store_true')
parser.add_argument('--pops', help='comma-separated list')
parser.add_argument('--slack_channel', help='Send message to Slack channel/user', default='@konradjk')
args = parser.parse_args()
if args.slack_channel:
from slack_token_pkg.slack_creds import slack_token
with slack.slack_notifications(slack_token, args.slack_channel):
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
834,
9800,
834,
796,
705,
74,
261,
6335,
74,
6,
198,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
6738,
4818,
8079,
1330,
3128,
19... | 2.586314 | 643 |
import os
import cv2
import argparse
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
import matplotlib.pyplot as plt
from reg.data import DataLoader, read_image_by_tf
data = DataLoader("../data/training_set",
one_hot_encoding=True,
palette=[255])
if __name__ == "__main__":
show_pred("") | [
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
350,
4146,
1330,
7412,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
2603... | 2.450331 | 151 |
import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url('https://api.us-south.language-translator.watson.cloud.ibm.com') | [
11748,
33918,
198,
11748,
28686,
198,
6738,
24283,
76,
62,
86,
13506,
1330,
15417,
8291,
41880,
53,
18,
198,
6738,
24283,
76,
62,
17721,
62,
21282,
74,
62,
7295,
13,
41299,
44549,
1330,
314,
2390,
47649,
26407,
198,
6738,
16605,
24330,
... | 2.92638 | 163 |
from django.conf.urls.defaults import patterns, include, url
try:
from django.views.generic.base import TemplateView
urlpatterns = patterns('',
url(r'^robots\.txt$', TextPlainView.as_view(template_name='robots.txt')),
)
except:
urlpatterns = patterns('django.views.generic.simple',
(r'^robots\.txt$', 'direct_to_template', {'template': 'robots.txt'}),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
7572,
11,
2291,
11,
19016,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
42625,
14208,
13,
33571,
13,
41357,
13,
8692,
1330,
37350,
7680,
198,
220,
220,
220,
19016,
3... | 2.50641 | 156 |
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector
# Copyright 2019 Awesome Technologies Innovationslabor GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import getpass
import hashlib
import hmac
import logging
import sys
from six.moves import input
import requests as _requests
import yaml
import secrets
import string
import csv
import qrcode
from fpdf import FPDF
import os
import base64
import time
if __name__ == "__main__":
main() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1853,
11,
1584,
4946,
27470,
12052,
198,
2,
15069,
2864,
968,
20650,
198,
2,
15069,
13130,
25020,
21852,
43405,
602,
75,
4820,
402,
2022,
39,
198,
2,
198,
2,... | 3.610345 | 290 |
from rest_framework.permissions import BasePermission, SAFE_METHODS
from v1.teams.models.team import CoreTeam, ProjectTeam
from v1.teams.models.team_member import CoreMember, ProjectMember
class IsStaffOrReadOnly(BasePermission):
"""The request is authenticated as a user and is staff, or is a read-only request"""
class IsSuperUserOrReadOnly(BasePermission):
"""The request is authenticated as a user and is superuser or is a read-only request"""
| [
6738,
1334,
62,
30604,
13,
525,
8481,
1330,
7308,
5990,
3411,
11,
37630,
36,
62,
49273,
50,
198,
198,
6738,
410,
16,
13,
660,
4105,
13,
27530,
13,
15097,
1330,
7231,
15592,
11,
4935,
15592,
198,
6738,
410,
16,
13,
660,
4105,
13,
2... | 3.511278 | 133 |
from .train import Train
from .test import Test
framwork = "torch"
| [
198,
6738,
764,
27432,
1330,
16835,
198,
6738,
764,
9288,
1330,
6208,
198,
198,
19298,
1818,
796,
366,
13165,
354,
1,
628
] | 3.181818 | 22 |
import io
import json
import os
import subprocess
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
try:
api
except NameError:
all_lines = list()
inports = [{'name': 'stream', 'type': 'message.file',"description":"Input csv byte or string"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.list',"description":"Output data as list"}]
#api.set_port_callback(inports[0]['name'], process)
if __name__ == '__main__':
#test_operator()
if True :
subprocess.run(["rm", '-r',
'/Users/d051079/OneDrive - SAP SE/GitHub/sdi_utils/solution/operators/sdi_utils_operators' + api.config.version])
gs.gensolution(os.path.realpath(__file__), api.config, inports, outports)
solution_name = api.config.operator_name + '_' + api.config.version
subprocess.run(["vctl", "solution", "bundle",
'/Users/d051079/OneDrive - SAP SE/GitHub/sdi_utils/solution/operators/sdi_utils_operators_' + api.config.version, \
"-t", solution_name])
subprocess.run(["mv", solution_name + '.zip', '../../../solution/operators'])
| [
11748,
33245,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
198,
11748,
264,
10989,
62,
26791,
13,
70,
641,
2122,
355,
308,
82,
198,
11748,
264,
10989,
62,
26791,
13,
2617,
62,
6404,
2667,
355,
25801,
198,
11748,
... | 2.379374 | 543 |
from datetime import datetime
atual = datetime.now().year
maiores = 0
menores = 0
for c in range(1,8):
nasc = int(input('Em Que Ano Você Nasceu? '))
idade = atual - nasc
if idade >= 18:
maiores += 1
else:
menores += 1
print('{} pessoa(s) é(são) de maior(es)'.format(maiores))
print('{} pessoa(s) é(são) de menor(es)'.format(menores))
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
265,
723,
796,
4818,
8079,
13,
2197,
22446,
1941,
198,
76,
1872,
2850,
796,
657,
198,
3653,
2850,
796,
657,
198,
198,
1640,
269,
287,
2837,
7,
16,
11,
23,
2599,
198,
220,
220,
220,
299... | 2.101695 | 177 |
"""Extract body part annotations."""
import re
import spacy
from traiter.const import COMMA
from traiter.patterns.matcher_patterns import MatcherPatterns
from myrsidea.pylib.const import COMMON_PATTERNS, CONJ, MISSING, REPLACE
JOINER = CONJ + COMMA
MISSING_RE = '|'.join([fr'\b{m}\b' for m in MISSING])
MISSING_RE = re.compile(MISSING_RE, flags=re.IGNORECASE)
BODY_PART = MatcherPatterns(
'body_part',
on_match='myrsidea.body_part.v1',
decoder=COMMON_PATTERNS | {
'seg': {'ENT_TYPE': 'segmented'},
'ord': {'ENT_TYPE': {'IN': ['ordinal', 'number_word']}},
},
patterns=[
'part+',
'part_loc+ part+',
# 'missing? any_part* part',
# 'part+ ord -? ord',
# 'part+ 99? -? 99',
# 'part+ ord?',
# 'part+ 99?',
# 'part+ ord -? seg',
# 'part+ 99 -? seg',
# 'ord? -? seg? part+',
# '99 - seg part+',
],
)
UNASSIGNED_PART = MatcherPatterns(
'unassigned_part',
on_match='myrsidea.unassigned_part.v1',
decoder=COMMON_PATTERNS | {
'roman': {'ENT_TYPE': 'roman'},
},
patterns=[
'roman',
'roman - roman',
],
)
@spacy.registry.misc(BODY_PART.on_match)
def body_part(ent):
"""Enrich a body part span."""
data = {}
parts = [REPLACE.get(t.lower_, t.lower_) for t in ent]
text = ' '.join(parts)
if MISSING_RE.search(ent.text.lower()) is not None:
data['missing'] = True
data['body_part'] = text
ent._.data = data
@spacy.registry.misc(UNASSIGNED_PART.on_match)
def unassigned_part(ent):
"""Enrich a body part span."""
# data = {}
#
# parts = [REPLACE.get(t.lower_, t.lower_) for t in ent]
#
# text = ' '.join(parts)
#
# if MISSING_RE.search(ent.text.lower()) is not None:
# data['missing'] = True
#
# data['body_part'] = text
#
# ent._.data = data
| [
37811,
11627,
974,
1767,
636,
37647,
526,
15931,
198,
198,
11748,
302,
198,
198,
11748,
599,
1590,
198,
6738,
1291,
2676,
13,
9979,
1330,
9440,
5673,
198,
6738,
1291,
2676,
13,
33279,
82,
13,
6759,
2044,
62,
33279,
82,
1330,
6550,
204... | 2.093304 | 911 |
SELIA_MANAGERS_APPS = [
'selia_managers',
]
| [
50,
3698,
3539,
62,
10725,
4760,
4877,
62,
2969,
3705,
796,
685,
198,
220,
220,
220,
705,
741,
544,
62,
805,
10321,
3256,
198,
60,
198
] | 1.846154 | 26 |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
import os
from typing import List, Tuple
import pandas as pd
import numpy as np
import datetime as dt
import py_scripts.transform
import pickle
MODELSDIR = r'../models'
DATASET = r'../data/sim_ts_limpo.csv'
class modelo_produtos:
""" encapsulador para os modelos de cada produto """ | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
... | 2.637795 | 127 |
#!/usr/bin/env python3
'''
CLI interface for the grape system.
This allows the user to only deal with a single interface.
'''
import os
import sys
from grape import __version__
from grape import create, delete, save, load, ximport, xexport, status, tree
PROGRAM = os.path.splitext(os.path.basename(sys.argv[0]))[0]
def help0():
'''
Top level help.
'''
print(f'''\
USAGE:
{PROGRAM} [COMMAND] [OPTIONS]
DESCRIPTION:
Welcome to the Grafana Model Development tool.
This tool allows you to create, develop and maintain Grafana
models in a local environment using docker containers.
It can even import from and export to production setups.
To use this tool you must have python3, pipenv and docker
installed.
COMMANDS:
Each command has its own help and examples. You get that help by
specifying "COMMAND help" or "COMMAND -h" or "COMMAND --help".
The following commands are available:
help This message.
version The system version.
create The create operation creates a docker container
for grafana and a docker container for postgres.
It then sets the datasource in the grafana server
and creates a local directory to save the postgres
state.
delete The delete operation deletes all artifacts created
by the create operation.
save The save operation captures the specified
visualization environment in a zip file.
It is what you use to capture changes for
future use.
load The load operation updates the visualization
environment from a saved state (zip file).
import The import operation captures an external
grafana environment for the purposes of
experimenting or working locally.
It imports rhe datasources without passwords
because grafana never exports passwords which
means that they have to be updated manually
after the import operation completes or by
specifying the passwords in the associated
conf file. It does not import databases.
The import operation creates a zip file that
can be used by a load operation. It also
requires a conf file that is specified by
the -x option.
export The export operation exports a grafana
visualization service to an external
source. It requires a zip file from a
save operation (-f) and a YAML file that
describes the external source (-x).
status Report the status of grape related
related containers by look for specific
labels that were added when the containers
were started.
tree Print a tree view of the datasources, folders
and dashboards in a grafana server.
VERSION:
{PROGRAM}-{__version__}
''')
sys.exit(0)
def version():
'''
Print the version.
'''
print(f'{PROGRAM}-{__version__}')
sys.exit(0)
def main():
'main'
if len(sys.argv) < 2:
# No arguments brings up the help.
help0()
# Create the command map.
fmap = {
# help
'help': help0,
'--help': help0,
'-h': help0,
# version
'version': version,
'--version': version,
'-V': version,
# commands
'create': create.main,
'delete': delete.main,
'save': save.main,
'load': load.main,
'import': ximport.main,
'export': xexport.main,
'status': status.main,
'tree': tree.main,
}
if sys.argv[1] == '-V':
# Special case handling because case matters.
fct = fmap[sys.argv[1]]
else:
arg = sys.argv[1].lower()
if arg not in fmap:
# look for partial matches.
# this allows the user specify abbreviated
# forms like "cr".
for key in fmap:
if key.startswith(arg) and not arg.startswith('-'):
arg = key
break
if arg not in fmap:
# if it still doesn't match, exit
# stage left.
sys.stderr.write(f'ERROR: unknown command "{arg}", '
'please run this command for more '
f'information: {PROGRAM} help')
sys.exit(1)
fct = fmap[arg]
sys.argv = sys.argv[1:]
# Handle the special case where the user typed:
# COMMAND (help|version)
if len(sys.argv) > 1:
if sys.argv[1] == 'help':
# user typed: COMMAND help
sys.argv[1] = '--help'
elif sys.argv[1] == 'version':
# user typed: COMMAND version
sys.argv[1] = '--version'
elif sys.argv[0] == 'help':
if not sys.argv[1].startswith('-'):
# user typed: help COMMAND
sys.argv[0] = sys.argv[1]
sys.argv[1] = '--help'
# Run the command.
fct()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
5097,
40,
7071,
329,
262,
30777,
1080,
13,
198,
198,
1212,
3578,
262,
2836,
284,
691,
1730,
351,
257,
2060,
7071,
13,
198,
7061,
6,
198,
11748,
28686,
198,
11748,
... | 2.220439 | 2,368 |
#! /usr/bin/python
import sys
print "fib_loop: ", fib_loop()
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
628,
198,
11748,
25064,
198,
198,
4798,
366,
69,
571,
62,
26268,
25,
33172,
12900,
62,
26268,
3419,
198
] | 2.37037 | 27 |
class Token:
"""
Basic token class.
Parameters
----------
lex : str
Token's lexeme.
token_type : Enum
Token's type.
"""
@property
| [
220,
220,
220,
220,
201,
198,
4871,
29130,
25,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
14392,
11241,
1398,
13,
201,
198,
201,
198,
220,
220,
220,
40117,
201,
198,
220,
220,
220,
24200,
438,
201,
198,
220,
220,
220... | 1.95098 | 102 |
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
parameters = {'n_estimators':[2, 5, 10, 50, 100],
'max_depth':[3, 4, 5],
'learning_rate':[0.001, 0.01, 0.1]}
model = GradientBoostingClassifier()
clfs = GridSearchCV(model, parameters, cv=3)
clfs.fit(X_train, y_train)
clf = clfs.best_estimator_ | [
6738,
1341,
35720,
13,
1072,
11306,
1330,
17701,
1153,
45686,
278,
9487,
7483,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
24846,
18243,
33538,
198,
198,
17143,
7307,
796,
1391,
6,
77,
62,
395,
320,
2024,
10354,
58,
17,
11,
64... | 2.323171 | 164 |
"""Various compromisable services.
Attackers are PERMITTED to:
- call any service method not prefixed by an underscore
Attackers are PROHIBITED from:
- instantiating additional services beyond the one(s) provided
- manually getting/setting service attributes, regardless of underscores
"""
from Cryptodome.Cipher import AES
import lib.ciphers as ciphers
import lib.encoding as encoding
import lib.rng as rng
| [
37811,
40009,
8382,
43942,
2594,
13,
198,
198,
27732,
364,
389,
19878,
44,
22470,
1961,
284,
25,
198,
12,
869,
597,
2139,
2446,
407,
7694,
2966,
416,
281,
44810,
198,
198,
27732,
364,
389,
4810,
12096,
9865,
22061,
422,
25,
198,
12,
... | 3.824074 | 108 |
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from zaqarclient.queues import client as zaqar
zaqar_notification_opts = [
cfg.StrOpt(
"zaqar_username",
help="Username that should be used for init of zaqar client.",
),
cfg.StrOpt(
"zaqar_password",
secret=True,
help="Password for user specified in opt 'zaqar_username'.",
),
cfg.StrOpt(
"zaqar_project_name",
help=("Project/Tenant name that is owns user specified "
"in opt 'zaqar_username'."),
),
cfg.StrOpt(
"zaqar_auth_url",
default="http://127.0.0.1:35357/v2.0/",
help="Auth url to be used by Zaqar client.",
),
cfg.StrOpt(
"zaqar_region_name",
help="Name of the region that should be used. Optional.",
),
cfg.StrOpt(
"zaqar_service_type",
default="messaging",
help="Service type for Zaqar. Optional.",
),
cfg.StrOpt(
"zaqar_endpoint_type",
default="publicURL",
help="Type of endpoint to be used for init of Zaqar client. Optional.",
),
cfg.FloatOpt(
"zaqar_api_version",
default=1.1,
help="Version of Zaqar API to use. Optional.",
),
cfg.ListOpt(
"zaqar_queues",
default=["manila_notification_qeueue"],
help=("List of queues names to be used for sending Manila "
"notifications. Optional."),
),
]
CONF = cfg.CONF
CONF.register_opts(zaqar_notification_opts, group='zaqar')
ZAQARCLIENT = zaqar.Client(
version=CONF.zaqar.zaqar_api_version,
conf={
"auth_opts": {
"backend": "keystone",
"options": {
"os_username": CONF.zaqar.zaqar_username,
"os_password": CONF.zaqar.zaqar_password,
"os_project_name": CONF.zaqar.zaqar_project_name,
"os_auth_url": CONF.zaqar.zaqar_auth_url,
"os_region_name": CONF.zaqar.zaqar_region_name,
"os_service_type": CONF.zaqar.zaqar_service_type,
"os_endpoint_type": CONF.zaqar.zaqar_endpoint_type,
"insecure": True,
},
},
},
)
| [
2,
15069,
357,
66,
8,
1853,
7381,
20836,
11,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220... | 2.161663 | 1,299 |
""" Constants. """
import os
POLICY_REMOTE_URL = "https://dl.eff.org/starttls-everywhere/policy.json"
POLICY_FILENAME = "policy.json"
POLICY_LOCAL_FILE = os.path.join(os.path.dirname(__file__), POLICY_FILENAME)
| [
37811,
4757,
1187,
13,
37227,
198,
198,
11748,
28686,
198,
198,
45472,
2149,
56,
62,
40726,
23051,
62,
21886,
796,
366,
5450,
1378,
25404,
13,
14822,
13,
2398,
14,
9688,
83,
7278,
12,
16833,
3003,
14,
30586,
13,
17752,
1,
198,
45472,
... | 2.366667 | 90 |
#!/usr/bin/env mayapy
import pymel.internal.apicache as apicache
import pymel.internal.cmdcache as cmdcache
print apicache
import pymel.internal.startup
print pymel.internal.startup
logger = pymel.internal.startup._logger
cacheClasses = (
# apicache.ApiMelBridgeCache,
apicache.ApiCache,
cmdcache.CmdExamplesCache,
cmdcache.CmdDocsCache,
cmdcache.CmdCache,
)
#version = '2017'
version = None
for cacheClass in cacheClasses:
cacheInst = getCache()
data = cacheInst.read(ext='.zip')
newExt = '.py'
newZipExt = newExt + '.zip'
cacheInst.write(data, ext=newExt)
cacheInst2 = getCache()
newData = cacheInst2.read(ext=newExt)
cacheInst.write(data, ext=newZipExt)
cacheInst3 = getCache()
newZipData = cacheInst3.read(ext=newZipExt)
assert newZipData == newData
from pymel.util.arguments import compareCascadingDicts
from pprint import pprint, pformat
diffs = compareCascadingDicts(data, newData)[-1]
if not diffs:
print "Yay!!! no diffs!"
else:
print "Boo... still diffs..."
diffPath = cacheInst.path(ext='.diff')
with open(diffPath, 'w') as f:
f.write(pformat(diffs))
print "wrote diffs to: {}".format(diffPath)
break
| [
2,
48443,
14629,
14,
8800,
14,
24330,
743,
12826,
198,
11748,
279,
4948,
417,
13,
32538,
13,
499,
291,
4891,
355,
2471,
291,
4891,
198,
11748,
279,
4948,
417,
13,
32538,
13,
28758,
23870,
355,
23991,
23870,
198,
4798,
2471,
291,
4891,... | 2.442085 | 518 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.zplsc_b
@file marine-integrations/mi/dataset/parser/zplsc_b.py
@author Ronald Ronquillo & Richard Han
@brief Parser for the zplsc_b dataset driver
This file contains code for the zplsc_b parser to produce data particles and echogram plots.
The Simrad EK60 scientific echo sounder supports the *.raw file format.
The *.raw file may contain one or more of the following datagram types:
Configuration, NMEA, Annotation, Sample.
Every *.raw file begins with a configuration datagram. A second configuration datagram
within the file is illegal. The data content of the Configuration datagram of an already
existing file cannot be altered from the EK60. NMEA, Annotation and Sample datagrams
constitute the remaining file content. These datagrams are written to the *.raw file in the
order that they are generated by the EK60. Note: Strictly sequential time tags are not guaranteed.
A data particle is produced from metadata contained in the first ping of the series.
The metadata and echogram plots are extracted from the Sample datagram portion of the *.raw file.
Release notes:
Initial Release
"""
__author__ = 'Ronald Ronquillo'
__license__ = 'Apache 2.0'
import calendar
import ntplib
import re
import os
import numpy as np
from multiprocessing import Process
from datetime import datetime, timedelta
from struct import unpack
from collections import defaultdict
from mi.dataset.dataset_parser import SimpleParser
from mi.core.common import BaseEnum
from mi.core.exceptions import RecoverableSampleException
from mi.core.instrument.data_particle import DataParticle
from mi.core.log import get_logging_metaclass
from mi.logging import log
from mi.dataset.parser.zplsc_echogram import SAMPLE_MATCHER, LENGTH_SIZE, DATAGRAM_HEADER_SIZE, \
CONFIG_HEADER_SIZE, CONFIG_TRANSDUCER_SIZE,\
generate_plots, read_datagram_header, read_config_header, read_config_transducer
class ZplscBParticleKey(BaseEnum):
"""
Class that defines fields that need to be extracted from the data
"""
FILE_TIME = "zplsc_timestamp" # raw file timestamp
FILE_PATH = "filepath" # output echogram plot .png/s path and filename
CHANNEL = "zplsc_channel"
TRANSDUCER_DEPTH = "zplsc_transducer_depth" # five digit floating point number (%.5f, in meters)
FREQUENCY = "zplsc_frequency" # six digit fixed point integer (in Hz)
TRANSMIT_POWER = "zplsc_transmit_power" # three digit fixed point integer (in Watts)
PULSE_LENGTH = "zplsc_pulse_length" # six digit floating point number (%.6f, in seconds)
BANDWIDTH = "zplsc_bandwidth" # five digit floating point number (%.5f in Hz)
SAMPLE_INTERVAL = "zplsc_sample_interval" # six digit floating point number (%.6f, in seconds)
SOUND_VELOCITY = "zplsc_sound_velocity" # five digit floating point number (%.5f, in m/s)
ABSORPTION_COEF = "zplsc_absorption_coeff" # four digit floating point number (%.4f, dB/m)
TEMPERATURE = "zplsc_temperature" # three digit floating point number (%.3f, in degC)
# The following is used for _build_parsed_values() and defined as below:
# (parameter name, encoding function)
METADATA_ENCODING_RULES = [
(ZplscBParticleKey.FILE_TIME, str),
(ZplscBParticleKey.FILE_PATH, lambda x: [str(y) for y in x]),
(ZplscBParticleKey.CHANNEL, lambda x: [int(y) for y in x]),
(ZplscBParticleKey.TRANSDUCER_DEPTH, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.FREQUENCY, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.TRANSMIT_POWER, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.PULSE_LENGTH, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.BANDWIDTH, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.SAMPLE_INTERVAL, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.SOUND_VELOCITY, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.ABSORPTION_COEF, lambda x: [float(y) for y in x]),
(ZplscBParticleKey.TEMPERATURE, lambda x: [float(y) for y in x])
]
# Numpy data type object for unpacking the Sample datagram including the header from binary *.raw
sample_dtype = np.dtype([('length1', 'i4'), # 4 byte int (long)
# DatagramHeader
('datagram_type', 'a4'), # 4 byte string
('low_date_time', 'i4'), # 4 byte int (long)
('high_date_time', 'i4'), # 4 byte int (long)
# SampleDatagram
('channel_number', 'i2'), # 2 byte int (short)
('mode', 'i2'), # 2 byte int (short)
('transducer_depth', 'f4'), # 4 byte float
('frequency', 'f4'), # 4 byte float
('transmit_power', 'f4'), # 4 byte float
('pulse_length', 'f4'), # 4 byte float
('bandwidth', 'f4'), # 4 byte float
('sample_interval', 'f4'), # 4 byte float
('sound_velocity', 'f4'), # 4 byte float
('absorption_coefficient', 'f4'), # 4 byte float
('heave', 'f4'), # 4 byte float
('roll', 'f4'), # 4 byte float
('pitch', 'f4'), # 4 byte float
('temperature', 'f4'), # 4 byte float
('trawl_upper_depth_valid', 'i2'), # 2 byte int (short)
('trawl_opening_valid', 'i2'), # 2 byte int (short)
('trawl_upper_depth', 'f4'), # 4 byte float
('trawl_opening', 'f4'), # 4 byte float
('offset', 'i4'), # 4 byte int (long)
('count', 'i4')]) # 4 byte int (long)
sample_dtype = sample_dtype.newbyteorder('<')
GET_CONFIG_TRANSDUCER = False # Optional data flag: not currently used
BLOCK_SIZE = 1024*4 # Block size read in from binary file to search for token
# ZPLSC EK 60 *.raw filename timestamp format
# ei. OOI-D20141211-T214622.raw
TIMESTAMP_FORMAT = "%Y%m%d%H%M%S"
# Regex to extract the timestamp from the *.raw filename (path/to/OOI-DYYYYmmdd-THHMMSS.raw)
FILE_NAME_MATCHER = re.compile(
r'.+-D(?P<Date>(\d{4})(\d{2})(\d{2}))-T(?P<Time>\d{2}\d{2}\d{2})\.raw')
class DataParticleType(BaseEnum):
"""
Class that defines the data particles generated from the zplsc_b data
"""
SAMPLE = 'zplsc_b_metadata' # instrument data particle
class ZplscBInstrumentDataParticle(DataParticle):
"""
Class for generating the zplsc_b_instrument data particle.
"""
_data_particle_type = DataParticleType.SAMPLE
def _build_parsed_values(self):
"""
Build parsed values for Instrument Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Instrument Particle Mapping table,
# where each entry is a tuple containing the particle field name
# and a function to use for data conversion.
return [self._encode_value(name, self.raw_data[name], function)
for name, function in METADATA_ENCODING_RULES]
class ZplscBParser(SimpleParser):
"""
Parser for zplsc_b *.raw files
"""
__metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, config, stream_handle, exception_callback, output_file_path):
"""
Initialize the zplsc_b parser, which does not use state or the chunker
and sieve functions.
@param config: The parser configuration dictionary
@param stream_handle: The stream handle of the file to parse
@param exception_callback: The callback to use when an exception occurs
@param output_file_path: The location to output the echogram plot .png files
"""
self.output_file_path = output_file_path
super(ZplscBParser, self).__init__(config, stream_handle, exception_callback)
def parse_file(self):
"""
Parse the *.raw file.
"""
# Extract the file time from the file name
input_file_name = self._stream_handle.name
(filepath, filename) = os.path.split(input_file_name)
# tuple contains the string before the '.', the '.', and the 'raw' string
outfile = filename.rpartition('.')[0]
match = FILE_NAME_MATCHER.match(input_file_name)
if match:
file_time = match.group('Date') + match.group('Time')
rel_file_path = os.path.join(*match.groups()[1:-1])
full_file_path = os.path.join(self.output_file_path, rel_file_path)
if not os.path.exists(full_file_path):
os.makedirs(full_file_path)
else:
file_time = ""
rel_file_path = ""
# Files retrieved from the instrument should always match the timestamp naming convention
self.recov_exception_callback("Unable to extract file time from input file name: %s."
"Expected format *-DYYYYmmdd-THHMMSS.raw" % input_file_name)
# Read binary file a block at a time
raw = self._stream_handle.read(BLOCK_SIZE)
# Set starting byte
byte_cnt = 0
# Read the configuration datagram, output at the beginning of the file
length1, = unpack('<l', raw[byte_cnt:byte_cnt+LENGTH_SIZE])
byte_cnt += LENGTH_SIZE
# Configuration datagram header
datagram_header = read_datagram_header(raw[byte_cnt:byte_cnt+DATAGRAM_HEADER_SIZE])
byte_cnt += DATAGRAM_HEADER_SIZE
# Configuration: header
config_header = read_config_header(raw[byte_cnt:byte_cnt+CONFIG_HEADER_SIZE])
byte_cnt += CONFIG_HEADER_SIZE
transducer_count = config_header['transducer_count']
if GET_CONFIG_TRANSDUCER:
td_gain = {}
td_gain_table = {}
td_pulse_length_table = {}
td_phi_equiv_beam_angle = {}
# Configuration: transducers (1 to 7 max)
for i in xrange(1, transducer_count+1):
config_transducer = read_config_transducer(
raw[byte_cnt:byte_cnt+CONFIG_TRANSDUCER_SIZE])
# Example data that one might need for various calculations later on
td_gain[i] = config_transducer['gain']
td_gain_table[i] = config_transducer['gain_table']
td_pulse_length_table[i] = config_transducer['pulse_length_table']
td_phi_equiv_beam_angle[i] = config_transducer['equiv_beam_angle']
byte_cnt += CONFIG_TRANSDUCER_SIZE * transducer_count
# Compare length1 (from beginning of datagram) to length2 (from the end of datagram) to
# the actual number of bytes read. A mismatch can indicate an invalid, corrupt, misaligned,
# or missing configuration datagram or a reverse byte order binary data file.
# A bad/missing configuration datagram header is a significant error.
length2, = unpack('<l', raw[byte_cnt:byte_cnt+LENGTH_SIZE])
if not (length1 == length2 == byte_cnt-LENGTH_SIZE):
raise ValueError(
"Length of configuration datagram and number of bytes read do not match: length1: %s"
", length2: %s, byte_cnt: %s. Possible file corruption or format incompatibility." %
(length1, length2, byte_cnt+LENGTH_SIZE))
first_ping_metadata = defaultdict(list)
trans_keys = range(1, transducer_count+1)
trans_array = dict((key, []) for key in trans_keys) # transducer power data
trans_array_time = dict((key, []) for key in trans_keys) # transducer time data
td_f = dict.fromkeys(trans_keys) # transducer frequency
td_dR = dict.fromkeys(trans_keys) # transducer depth measurement
position = 0
while raw:
# We only care for the Sample datagrams, skip over all the other datagrams
match = SAMPLE_MATCHER.search(raw)
if not match:
# Read in the next block w/ a token sized overlap
self._stream_handle.seek(self._stream_handle.tell() - 4)
raw = self._stream_handle.read(BLOCK_SIZE)
# The last 4 bytes is just the length2 of the last datagram
if len(raw) <= 4:
break
# Offset by size of length value
match_start = match.start() - LENGTH_SIZE
# Seek to the position of the length data before the token to read into numpy array
self._stream_handle.seek(position + match_start)
# Read and unpack the Sample Datagram into numpy array
sample_data = np.fromfile(self._stream_handle, dtype=sample_dtype, count=1)
channel = sample_data['channel_number'][0]
# Check for a valid channel number that is within the number of transducers config
# to prevent incorrectly indexing into the dictionaries.
# An out of bounds channel number can indicate invalid, corrupt,
# or misaligned datagram or a reverse byte order binary data file.
# Log warning and continue to try and process the rest of the file.
if channel < 0 or channel > transducer_count:
log.warn("Invalid channel: %s for transducer count: %s."
"Possible file corruption or format incompatibility.", channel, transducer_count)
# Need current position in file to increment for next regex search offset
position = self._stream_handle.tell()
# Read the next block for regex search
raw = self._stream_handle.read(BLOCK_SIZE)
continue
# Convert high and low bytes to internal time
internal_time = (sample_data['high_date_time'][0] << 32) + sample_data['low_date_time'][0]
# Note: Strictly sequential time tags are not guaranteed.
trans_array_time[channel].append(internal_time)
# Gather metadata once per transducer channel number
if not trans_array[channel]:
file_path = os.path.join(
rel_file_path, outfile + '_' + str(int(sample_data['frequency'])/1000) + 'k.png')
first_ping_metadata[ZplscBParticleKey.FILE_TIME] = file_time
first_ping_metadata[ZplscBParticleKey.FILE_PATH].append(file_path)
first_ping_metadata[ZplscBParticleKey.CHANNEL].append(channel)
first_ping_metadata[ZplscBParticleKey.TRANSDUCER_DEPTH].append(sample_data['transducer_depth'][0])
first_ping_metadata[ZplscBParticleKey.FREQUENCY].append(sample_data['frequency'][0])
first_ping_metadata[ZplscBParticleKey.TRANSMIT_POWER].append(sample_data['transmit_power'][0])
first_ping_metadata[ZplscBParticleKey.PULSE_LENGTH].append(sample_data['pulse_length'][0])
first_ping_metadata[ZplscBParticleKey.BANDWIDTH].append(sample_data['bandwidth'][0])
first_ping_metadata[ZplscBParticleKey.SAMPLE_INTERVAL].append(sample_data['sample_interval'][0])
first_ping_metadata[ZplscBParticleKey.SOUND_VELOCITY].append(sample_data['sound_velocity'][0])
first_ping_metadata[ZplscBParticleKey.ABSORPTION_COEF].append(sample_data['absorption_coefficient'][0])
first_ping_metadata[ZplscBParticleKey.TEMPERATURE].append(sample_data['temperature'][0])
# Make only one particle for the first ping series containing data for all channels
if channel == config_header['transducer_count']:
# Convert from Windows time to NTP time.
time = datetime(1601, 1, 1) + timedelta(microseconds=internal_time/10.0)
year, month, day, hour, min, sec = time.utctimetuple()[:6]
unix_time = calendar.timegm((year, month, day, hour, min, sec+(time.microsecond/1e6)))
time_stamp = ntplib.system_to_ntp_time(unix_time)
# Extract a particle and append it to the record buffer
# Note: numpy unpacked values still need to be encoded
particle = self._extract_sample(ZplscBInstrumentDataParticle, None,
first_ping_metadata,
time_stamp)
log.debug('Parsed particle: %s', particle.generate_dict())
self._record_buffer.append(particle)
# Extract various calibration parameters used for generating echogram plot
# This data doesn't change so extract it once per channel
td_f[channel] = sample_data['frequency'][0]
td_dR[channel] = sample_data['sound_velocity'][0] * sample_data['sample_interval'][0] / 2
count = sample_data['count'][0]
# Extract array of power data
power_dtype = np.dtype([('power_data', '<i2')]) # 2 byte int (short)
power_data = np.fromfile(self._stream_handle, dtype=power_dtype, count=count)
# Decompress power data to dB
trans_array[channel].append(power_data['power_data'] * 10. * np.log10(2) / 256.)
# Read the athwartship and alongship angle measurements
if sample_data['mode'][0] > 1:
angle_dtype = np.dtype([('athwart', '<i1'), ('along', '<i1')]) # 1 byte ints
angle_data = np.fromfile(self._stream_handle, dtype=angle_dtype, count=count)
# Read and compare length1 (from beginning of datagram) to length2
# (from the end of datagram). A mismatch can indicate an invalid, corrupt,
# or misaligned datagram or a reverse byte order binary data file.
# Log warning and continue to try and process the rest of the file.
len_dtype = np.dtype([('length2', '<i4')]) # 4 byte int (long)
length2_data = np.fromfile(self._stream_handle, dtype=len_dtype, count=1)
if not (sample_data['length1'][0] == length2_data['length2'][0]):
log.warn("Mismatching beginning and end length values in sample datagram: length1"
": %s, length2: %s. Possible file corruption or format incompatibility."
, sample_data['length1'][0], length2_data['length2'][0])
# Need current position in file to increment for next regex search offset
position = self._stream_handle.tell()
# Read the next block for regex search
raw = self._stream_handle.read(BLOCK_SIZE)
# Driver spends most of the time plotting,
# this can take longer for more transducers so lets break out the work
processes = []
for channel in td_f.iterkeys():
try:
process = Process(target=self.generate_echogram_plot,
args=(trans_array_time[channel], trans_array[channel],
td_f[channel], td_dR[channel], channel,
os.path.join(
self.output_file_path,
first_ping_metadata[ZplscBParticleKey.FILE_PATH][channel-1])))
process.start()
processes.append(process)
except Exception, e:
log.error("Error: Unable to start process: %s", e)
for p in processes:
p.join()
@staticmethod | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
31,
26495,
21504,
13,
19608,
292,
316,
13,
48610,
13,
89,
489,
1416,
62,
65,
198,
31,
7753,
16050,
12,
18908,
9143,
14,
11632,
14,
19608,
292,
316,
14,
48610,
14,
... | 2.221293 | 9,083 |
from typing import Optional
from ...error import GraphQLError
from ...language import OperationDefinitionNode, OperationType
from . import ASTValidationRule
__all__ = ['SingleFieldSubscriptionsRule', 'single_field_only_message']
class SingleFieldSubscriptionsRule(ASTValidationRule):
"""Subscriptions must only include one field.
A GraphQL subscription is valid only if it contains a single root
"""
| [
6738,
19720,
1330,
32233,
198,
198,
6738,
2644,
18224,
1330,
29681,
48,
2538,
81,
1472,
198,
6738,
2644,
16129,
1330,
14680,
36621,
19667,
11,
14680,
6030,
198,
6738,
764,
1330,
29273,
7762,
24765,
31929,
198,
198,
834,
439,
834,
796,
3... | 3.765766 | 111 |
from typing import List
from autoprep.relation import Relation
from autoprep.storage.base.train_storage_mixin import TrainStorageMixin
from autoprep.table import Table
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
22320,
7856,
13,
49501,
1330,
4718,
341,
198,
6738,
22320,
7856,
13,
35350,
13,
8692,
13,
27432,
62,
35350,
62,
19816,
259,
1330,
16835,
31425,
35608,
259,
198,
6738,
22320,
7856,
13,
11487,
1330... | 3.953488 | 43 |
import datetime
import time
import datedelta
import sqlalchemy.orm
import models.collateral
import models.database
import models.financing_statement
import models.party
import models.payment
import models.search
import schemas.financing_statement
import schemas.payment
| [
11748,
4818,
8079,
198,
11748,
640,
198,
198,
11748,
14567,
12514,
198,
11748,
44161,
282,
26599,
13,
579,
198,
198,
11748,
4981,
13,
26000,
10534,
198,
11748,
4981,
13,
48806,
198,
11748,
4981,
13,
15643,
5077,
62,
26090,
198,
11748,
4... | 3.835616 | 73 |
from collections import deque | [
6738,
17268,
1330,
390,
4188
] | 5.8 | 5 |
from decimal import Inexact
from sqlalchemy import Column, Float
from .database import Base
from sqlalchemy import Column, Integer, String, Float
# this is a truncated version of the above table
# to improve browser performance
| [
6738,
32465,
1330,
554,
1069,
529,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
48436,
198,
6738,
764,
48806,
1330,
7308,
198,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
11,
48436,
198,
198,
2,
428,
318,
257,
4... | 4.181818 | 55 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
def StoreRecords(data, path='data.csv', verbose=False):
'''Store records in a CSV file.'''
try:
with open(path, 'wb') as f:
w = csv.writer(f)
w.writerow(data[0].keys())
for record in data:
w.writerow([value.encode('utf-8').strip().replace('\n', '') for value in record.values()])
except Exception as e:
print 'Could not store data in CSV.'
print e
return False
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
269,
21370,
198,
198,
4299,
9363,
6690,
3669,
7,
7890,
11,
3108,
11639,
7890,
13,
40664,
3256,
15942,
577,
28,
2... | 2.427083 | 192 |
from request import request
if __name__ == '__main__':
main() | [
6738,
2581,
1330,
2581,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419
] | 3 | 22 |
import unittest
from Day12 import sum_of_numbers_in_json as sut
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
11748,
555,
715,
395,
198,
198,
6738,
3596,
1065,
1330,
2160,
62,
1659,
62,
77,
17024,
62,
259,
62,
17752,
355,
264,
315,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
1... | 2.431373 | 51 |
import requests
import json
import time
read("Hello To All ")
time.sleep(2)
# Steps To Find any News API
# #
# 1. go to chrome search for news API
# 2. create new account and get your api key
# 3. then go to source section search your favorite News API
# 4. copy that url
# 5. then enter here
while(True):
read("enter your choice which type of news do you want to listen ")
read("enter 1 for health news in india")
read("enter 2 for bussiness news in india")
read("enter 3 for sport news in india")
read("enter 4 for technology news in india")
read("enter your choice i am waiting for your interest of news ")
n = int(input("enter your choice : "))
if (n == 1):
str ="https://newsapi.org/v2/top-headlines?country=in&category=healt" \
"h&apiKey=3982dbaaa1d84cacba10d3c020c6d033"
read("latest health news are ....")
break
elif (n == 2):
str ="https://newsapi.org/v2/top-headlines?country=in&category=busine" \
"ss&apiKey=3982dbaaa1d84cacba10d3c020c6d033"
read("latest bussiness news are....")
break
elif (n == 3):
str ="https://newsapi.org/v2/top-headlines?country=in&category=sports&" \
"apiKey=3982dbaaa1d84cacba10d3c020c6d033"
read("latest sport news are....")
break
elif(n==4):
str="https://newsapi.org/v2/top-headlines?country=in&category=technology" \
"&apiKey=3982dbaaa1d84cacba10d3c020c6d033"
read("latest technology news are....")
break
else:
print("enter correct choice")
# example for API
# str="https://newsapi.org/v2/top-headlines?country=in&apiKey=3982dbaaa1d84cacba10d3c020c6d033"
if __name__== '__main__':
url=str
news=requests.get(url).text
news_dict=json.loads(news)
art=news_dict['articles']
for article in art:
read(article['title'])
time.sleep(2)
read("Moving on to the next news...Listen Carefully")
time.sleep(3)
read("Thanks for listening")
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
640,
628,
198,
961,
7203,
15496,
1675,
1439,
366,
8,
198,
198,
2435,
13,
42832,
7,
17,
8,
198,
2,
32144,
1675,
9938,
597,
3000,
7824,
198,
2,
1303,
198,
2,
352,
13,
467,
284,
32030,
2989... | 2.335632 | 870 |