id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11493761
|
from django.contrib.auth.models import User
from django.urls import path
from . import views
import blog
urlpatterns=[
path('', blog.views.blogPage),
path('login/', views.loginUser, name='login'),
path('logout/', views.logoutUser, name='logout'),
path('profile/', views.userProfile, name='profile')
]
|
11493818
|
from gcg.data.timer import timeit
from gcg.data.logger import logger
from gcg.algos.gcg import GCG
class GCGinference(GCG):
def __init__(self,
exp_name,
env_params, env_eval_params,
rp_params, rp_eval_params,
labeller_params,
policy_params,
alg_params,
log_level='info', log_fname='log.txt', seed=None, is_continue=False, params_txt=None):
env_eval_params = None
if not alg_params['init_inference_ckpt']:
print('\n\n!!!!!!!!! No checkpoint being loaded !!!!!!!!!\n\n')
alg_params['init_train_ckpt'] = None
super(GCGinference, self).__init__(
exp_name=exp_name,
env_params=env_params, env_eval_params=env_eval_params,
rp_params=rp_params, rp_eval_params=rp_eval_params,
labeller_params=labeller_params,
policy_params=policy_params,
alg_params=alg_params,
log_level=log_level, log_fname=log_fname, seed=seed, is_continue=is_continue, params_txt=params_txt
)
#################
### Inference ###
#################
def _run_init_inference(self):
inference_itr = self._fm.get_inference_itr()
self._restore_rollouts('train')
self._restore_rollouts('eval')
save_itr = inference_itr
start_step = save_itr * self._save_every_n_steps
timeit.reset()
timeit.start('total')
return start_step, save_itr
def run(self):
start_step, save_itr = self._run_init_inference()
last_eval_step = 0
step = start_step
while step < self._total_steps:
step += 1
if step >= self._sample_after_n_steps:
step = self._run_env_step(step)
if step - last_eval_step >= self._eval_every_n_steps and self._replay_pool.finished_storing_rollout:
self._run_env_eval(step, do_sampler_step=True, calculate_holdout=False)
last_eval_step = step
if step % self._log_every_n_steps == 0:
self._run_log(step)
if step % self._save_every_n_steps == 0:
logger.info('Saving files for itr {0}'.format(save_itr))
self._save_inference(save_itr,
self._replay_pool.get_recent_rollouts(),
self._replay_pool_eval.get_recent_rollouts())
save_itr += 1
if step >= self._total_steps:
logger.info('Saving files for itr {0}'.format(save_itr))
self._save_inference(save_itr,
self._replay_pool.get_recent_rollouts(),
self._replay_pool_eval.get_recent_rollouts())
|
11493827
|
from typing import Dict
from boa3.model.builtin.interop.interopevent import InteropEvent
from boa3.model.variable import Variable
class NotifyMethod(InteropEvent):
def __init__(self):
self._event_name_key = 'notification_name'
from boa3.model.type.type import Type
identifier = 'notify'
syscall = 'System.Runtime.Notify'
args: Dict[str, Variable] = {'state': Variable(Type.any),
self._event_name_key: Variable(Type.str)
}
import ast
event_name_default = ast.parse("'{0}'".format(identifier)
).body[0].value
super().__init__(identifier, syscall, args, defaults=[event_name_default])
@property
def generate_name(self) -> bool:
return False
@property
def args_to_generate(self) -> Dict[str, Variable]:
return {key_name: value_type
for key_name, value_type in self.args.items()
if key_name != self._event_name_key}
|
11493855
|
import argparse
import cv2
import random
from glob import glob
import time
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
code_dir = os.path.dirname(os.path.dirname(current_dir))
sys.path.insert(0, code_dir)
from tools.utils import mkdir
from data.folder_dataset import make_dataset
def main(args):
frame_paths = make_dataset(args.data_root, recursive=True, from_vid=False)
random.shuffle(frame_paths)
mkdir(args.out_dir)
i = len(glob(os.path.join(args.out_dir, "*.png")))
global frame
for path in frame_paths:
try:
original_frame = cv2.imread(path)
frame = original_frame.copy()
success, (x, y) = annotate()
if success:
out_path = os.path.join(args.out_dir, f"{i:05d}_{x}_{y}.jpg")
cv2.imwrite(out_path, original_frame)
i += 1
except:
print(f"Skipping {path}")
time.sleep(3)
def annotate():
cv2.namedWindow('image')
cv2.setMouseCallback('image', get_x_y)
while True:
cv2.imshow('image', frame)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
elif k == ord('a'):
return True, (mouseX, mouseY)
return False, (None, None)
def get_x_y(event, x, y, flags, param):
global mouseX, mouseY
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(frame, (x, y), 10, (255, 0, 0), -1)
mouseX, mouseY = x, y
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, required=True)
parser.add_argument('--out_dir', type=str, required=True)
args = parser.parse_args()
main(args)
|
11493881
|
import os
TEST_BUCKET = os.environ.get("GCSFS_TEST_BUCKET", "gcsfs_test")
TEST_PROJECT = os.environ.get("GCSFS_TEST_PROJECT", "project")
TEST_REQUESTER_PAYS_BUCKET = "gcsfs_test_req_pay"
|
11493894
|
from textwrap import dedent
from setuptools import setup, find_packages
setup(
version = '5.5.0.post6',
name = 'clingo-cffi-system',
description = 'CFFI-based bindings to the clingo solver.',
long_description = dedent('''\
This package provides CFFI-based bindings to the clingo solver.
Clingo is part of the [Potassco](https://potassco.org) project for *Answer Set Programming* (ASP).
ASP offers a simple and powerful modeling language to describe combinatorial problems as *logic programs*.
The *clingo* system then takes such a logic program and computes *answer sets* representing solutions to the given problem.
To get an idea, check our [Getting Started](https://potassco.org/doc/start/) page and the [online version](https://potassco.org/clingo/run/) of clingo.
Temporarily, the API documentation of this project can be found [here](https://www.cs.uni-potsdam.de/~kaminski/pyclingo-cffi/).
'''),
long_description_content_type='text/markdown',
author = '<NAME>',
author_email = '<EMAIL>',
license = 'MIT',
url = 'https://github.com/potassco/clingo',
setup_requires=["cffi>=1.0.0"],
cffi_modules=["build.py:ffi"],
install_requires=["cffi>=1.0.0"],
packages=[ 'clingo' ],
package_data={ 'clingo': [ 'py.typed' ] },
python_requires=">=3.6"
)
|
11493895
|
from networks.backbone import resnet
def build_backbone(backbone, output_stride, BatchNorm, nInputChannels, pretrained):
if backbone == "resnet101":
return resnet.ResNet101(
output_stride,
BatchNorm,
nInputChannels=nInputChannels,
pretrained=pretrained,
)
elif backbone == "resnet50":
return resnet.ResNet50(
output_stride,
BatchNorm,
nInputChannels=nInputChannels,
pretrained=pretrained,
)
else:
raise NotImplementedError
|
11493925
|
import os
import requests
import getpass
import configargparse
from .progress_bar import CustomProgress
from .metadata import Metadata
from .constants import extensions
class Crawler:
base_url = 'https://www.hackerrank.com/'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36 Edg/85.0.564.63'
login_url = base_url + 'auth/login'
submissions_url = base_url + 'rest/contests/master/submissions/?offset={}&limit={}'
challenge_url = base_url + 'rest/contests/master/challenges/{}/submissions/{}'
domain_url = base_url + 'domains/{}'
subdomain_url = base_url + 'domains/{}/{}'
problem_url = base_url + 'challenges/{}/problem'
subdomain_readme_text = '## [{}]({})\n\n|Problem Name|Problem Link|Language|Solution Link|\n---|---|---|---\n'
domain_readme_text = '## [{}]({})\n\n|Subdomain|Problem Name|Problem Link|Language|Solution Link|\n---|---|---|---|---\n'
root_readme_text = '## [Hackerrank]({})\n\n|Domain|Subdomain|Problem Name|Problem Link|Language|Solution Link|\n---|---|---|---|---|---\n'
readme_headers_len = len(subdomain_readme_text.split('\n')) - 1
subdomain_readme_row = '|{}|[Problem]({})|{}|[Solution]({})|\n'
domain_readme_row = '|{}|{}|[Problem]({})|{}|[Solution]({})|\n'
root_readme_row = '|{}|{}|{}|[Problem]({})|{}|[Solution]({})|\n'
base_folder_name = 'Hackerrank'
# make a separate folder for different languages e.g Hackerrank/Regex/Introduction/python3/matching.py
make_language_folder = False
# prepend language in file extension e.g Hackerrank/Regex/Introduction/matching.python3.py
prepend_language_in_extension = False
file_extensions = extensions
def __init__(self):
self.session = requests.Session()
self.total_submissions = 0
self.options = {}
def login(self, username, password):
resp = self.session.post(self.login_url, auth=(username, password), headers={'user-agent': self.user_agent})
data = resp.json()
if data['status']:
self.cookies = self.session.cookies.get_dict()
self.headers = resp.request.headers
self.get_number_of_submissions()
return data['status']
def parse_script(self):
p = configargparse.ArgParser(default_config_files=['./user.yaml'])
p.add('-c', '--config', is_config_file=True, help='config file path')
p.add('-l', '--limit', help='limit to no. of solutions to be crawled')
p.add('-o', '--offset', help='crawl solutions starting from this number')
p.add('-u', '--username', help='hackerrank account username')
p.add('-p', '--password', help='hackerrank account password')
self.options = p.parse_args()
def authenticate(self):
username = self.options.username or input('Hackerrank Username: ')
password = self.options.password or getpass.getpass('Hackerrank Password: ')
return self.login(username, password)
def get_number_of_submissions(self):
if not self.total_submissions:
all_submissions_url = self.get_all_submissions_url(0, 0)
resp = self.session.get(all_submissions_url, headers=self.headers)
self.total_submissions = resp.json()['total']
return self.total_submissions
def get_all_submissions_url(self, offset, limit):
return self.submissions_url.format(offset, limit)
def get_submission_url(self, challenge_slug, submission_id):
return self.challenge_url.format(challenge_slug, submission_id)
def store_submission(self, file_name, code):
if not os.path.exists(file_name):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as text_file:
text_file.write(code)
def update_readme(self, readme_file_path, readme_text):
header_length = self.readme_headers_len
with open(readme_file_path, 'r+') as text_file:
lines = text_file.readlines()
lines.append(readme_text)
sortedlines = lines[:header_length] + sorted(lines[header_length:])
text_file.seek(0)
text_file.writelines(sortedlines)
def write(self, file_name, text):
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with open(file_name, 'w') as text_file:
text_file.write(text)
def create_readmes(self, domain_name, subdomain_name, domain_url, subdomain_url,
subdomain_readme_path, domain_readme_path, root_readme_path):
"""
Method to check if readme files already exist. If readme files doesn't exist, then create them and add headers.
"""
if not os.path.exists(subdomain_readme_path):
text = self.subdomain_readme_text.format(subdomain_name, subdomain_url)
self.write(subdomain_readme_path, text)
if not os.path.exists(domain_readme_path):
text = self.domain_readme_text.format(domain_name, domain_url)
self.write(domain_readme_path, text)
if not os.path.exists(root_readme_path):
text = self.root_readme_text.format(self.base_url)
self.write(root_readme_path, text)
def update_readmes(self, domain_name, subdomain_name, domain_url, subdomain_url,
challenge_name, challenge_slug, language, file_name_with_extension):
"""
Method to add a new row corresponding to a new solution in the readme files
"""
subdomain_readme_path = os.path.join(self.base_folder_name, domain_name, subdomain_name, 'README.md')
if self.make_language_folder:
subdomain_readme_path = os.path.join(self.base_folder_name, domain_name, subdomain_name, language, 'README.md')
domain_readme_path = os.path.join(self.base_folder_name, domain_name, 'README.md')
root_readme_path = os.path.join(self.base_folder_name, 'README.md')
self.create_readmes(domain_name, subdomain_name, domain_url, subdomain_url,
subdomain_readme_path, domain_readme_path, root_readme_path)
problem_url = self.problem_url.format(challenge_slug)
file_path_relative_to_subdomain = './' + file_name_with_extension
file_path_relative_to_domain = '{}/{}'.format(subdomain_name, file_name_with_extension)
file_path_relative_to_root = '{}/{}/{}'.format(domain_name, subdomain_name, file_name_with_extension)
subdomain_readme_text = self.subdomain_readme_row.format(challenge_name, problem_url, language, file_path_relative_to_subdomain)
domain_readme_text = self.domain_readme_row.format(subdomain_name, challenge_name, problem_url, language, file_path_relative_to_domain)
root_readme_text = self.root_readme_row.format(domain_name, subdomain_name, challenge_name, problem_url, language, file_path_relative_to_root)
self.update_readme(
subdomain_readme_path,
subdomain_readme_text,
)
self.update_readme(
domain_readme_path,
domain_readme_text,
)
self.update_readme(
root_readme_path,
root_readme_text,
)
def get_submissions(self, submissions):
headers = self.headers
progress = CustomProgress('Downloading Solutions', max=len(submissions))
metadata = Metadata()
for submission in submissions:
submission_id = submission['id']
challenge_id = submission['challenge_id']
status = submission['status']
language = submission['language']
status_code = submission['status_code']
challenge = submission['challenge']
challenge_name = challenge['name']
challenge_slug = challenge['slug']
if submission_id > metadata.get(challenge_id) and (status == 'Accepted' or status_code == 2):
metadata.put(challenge_id, submission_id)
submission_url = self.get_submission_url(challenge_slug, submission_id)
resp = self.session.get(submission_url, headers=headers)
data = resp.json()['model']
code = data['code']
track = data['track']
# Default should be empty
file_extension = ''
file_name = challenge_slug
domain_name = 'Others'
subdomain_name = 'Miscellaneous'
domain_slug = ''
subdomain_slug = ''
if track:
domain_name = track['track_name'].strip().replace(' ', '')
subdomain_name = track['name'].strip().replace(' ', '')
domain_slug = track['track_slug']
subdomain_slug = track['slug']
domain_url = self.domain_url.format(domain_slug)
subdomain_url = self.subdomain_url.format(domain_slug, subdomain_slug)
if language in self.file_extensions:
if self.prepend_language_in_extension:
file_extension += '.{}'.format(language)
file_extension += '.{}'.format(self.file_extensions[language])
if file_extension.endswith('.java'):
file_name = challenge_name.replace(' ','')
file_name_with_extension = file_name + file_extension
file_path = os.path.join(self.base_folder_name, domain_name, subdomain_name, file_name_with_extension)
if self.make_language_folder:
file_path = os.path.join(self.base_folder_name, domain_name, subdomain_name, language, file_name_with_extension)
self.store_submission(file_path, code)
self.update_readmes(domain_name, subdomain_name, domain_url, subdomain_url,
challenge_name, challenge_slug, language, file_name_with_extension)
progress.next()
progress.finish()
print('All Solutions Crawled')
def main():
crawler = Crawler()
crawler.parse_script()
if not crawler.authenticate():
print('Auth was unsuccessful. Exiting the program')
exit(1)
limit = crawler.options.limit or crawler.total_submissions
offset = crawler.options.offset or 0
print('Start crawling {} solutions starting from {}'.format(limit, offset))
all_submissions_url = crawler.get_all_submissions_url(offset, limit)
resp = crawler.session.get(all_submissions_url, headers=crawler.headers)
data = resp.json()
models = data['models']
crawler.get_submissions(models)
if __name__ == "__main__":
main()
|
11493954
|
from jumpscale.loader import j
PYTHON_PACKAGES = [
"jupyterlab",
"voila",
"voila-gridstack",
"voila-vuetify",
"matplotlib",
"ipywidgets",
"jupyterlab_code_formatter",
]
class notebooks:
def __init__(self):
self.notebook_dir = j.sals.fs.join_paths(j.core.dirs.BASEDIR)
def get_cmd(self, voila=False, base_url=None, ip="127.0.0.1", port=8888):
if not voila:
# This needs to be executed in the same process and startup cmds uses exec -a <process-name>
# so if we used a semicolon, it will seperate the execution
cmd = "jupyter serverextension enable --py jupyterlab_code_formatter\n"
cmd += "jupyter lab --no-browser --NotebookApp.allow_remote_access=True --NotebookApp.token=''"
cmd += f" --NotebookApp.password='' --ip={ip} --port={port} --allow-root"
else:
cmd = f"voila --Voila.ip={ip} --Voila.port={port}"
if base_url:
cmd += f" --NotebookApp.base_url={base_url}"
return cmd
@property
def startupcmd(self):
cmd = j.tools.startupcmd.get("notebooks")
start_cmd = self.get_cmd(base_url="/notebooks/")
cmd.start_cmd = start_cmd
cmd.ports = [8888]
cmd.save()
return cmd
def install(self):
"""Called when package is added
"""
rc, _, _ = j.sals.process.execute("python -c 'import jupyterlab'")
if rc:
for package in PYTHON_PACKAGES:
j.logger.info(f"Installing {package}...")
rc, _, err = j.sals.process.execute(f"pip3 install {package}")
if rc:
raise j.exceptions.Runtime(err)
cmd = """
jupyter labextension install @jupyter-voila/jupyterlab-preview --no-build
jupyter labextension install @ryantam626/jupyterlab_code_formatter --no-build
jupyter labextension install @jupyter-widgets/jupyterlab-manager --no-build
jupyter labextension install voila --no-build
jupyter lab build --minimize=False
jupyter extension enable voila --sys-prefix
jupyter nbextension install voila --sys-prefix --py
jupyter nbextension enable voila --sys-prefix --py
"""
j.logger.info("Installing jupyter labextensions...")
rc, _, err = j.sals.process.execute(cmd, showout=True)
if rc:
raise j.exceptions.Runtime(err)
def uninstall(self):
"""Called when package is deleted
"""
rc, _, _ = j.sals.process.execute("python -c 'import jupyterlab'")
if not rc:
for package in PYTHON_PACKAGES:
rc, _, err = j.sals.process.execute(f"pip3 uninstall -y {package}", showout=True)
if rc:
raise j.exceptions.Runtime(err)
def start(self):
"""Called when threebot is started
"""
if not self.startupcmd.is_running():
self.startupcmd.start()
def stop(self):
if self.startupcmd.is_running():
self.startupcmd.stop()
|
11493999
|
from os.path import dirname, join
from .printer import prt
description = "Tutorial: Import, type, sort, and hash partition a file."
# File is stored in same directory as this python file.
# In a real project, set "input directory" in config file instead!
filename = join(dirname(__file__), 'data.csv')
def main(urd):
prt.source(__file__)
prt()
prt('Import a CSV-file. Type, sort, and hash-partition the imported dataset.')
prt()
imp = urd.build('csvimport', filename=filename)
imp = urd.build(
'dataset_type',
source=imp,
column2type=dict(
Date='date:%Y-%m-%d',
String='unicode:utf-8',
Int='number',
Float='float64',
),
)
imp = urd.build('dataset_sort', source=imp, sort_columns='Date')
imp = urd.build('dataset_hashpart', source=imp, hashlabel='String')
prt()
prt('Note how the output from a build()-call is used as input')
prt('to the next in order to pass data and/or parameters and create')
prt('dependencies.')
prt()
with prt.header('VISUALISING A JOB\'S DEPENDENCIES'):
prt('View the job\'s metadata using')
prt.command('ax job %s' % (imp,))
prt('We can see that the dataset "%s" is input to this job.' % (imp.params.datasets.source,))
prt()
with prt.header('REFERENCES TO ALL JOBS ARE STORED IN THE "urd.joblist" OBJECT:'):
prt.output(urd.joblist.pretty)
prt('All jobs are stored in "urd.joblist" so that they can be easily')
prt('fetched at a later time. This will be used in the next step.')
prt()
with prt.header('HASH PARTITIONED DATASET'):
prt('''
Hash partitioning is a very efficient way to prepare
a dataset for parallel processing (see manual).
Take a look at the dataset created by dataset_hashpart:
''')
prt.command('ax ds %s' % (imp,))
prt('''
The asterisk on the row corresponding to the "String" column
indicates that the dataset is hash partitioned based the values
in this column. It is possible to use the "-s" option to
"ax cat" to print data from individual slices.)
We can also see how many rows there are in each slice by typing
''')
prt.command('ax ds -s %s' % (imp,))
|
11494004
|
import re
from django.db import models
from logging import getLogger
from api.exceptions import InvalidInput
logger = getLogger(__name__)
class DomainMetadata(models.Model):
"""
This table contains metadata for all domains.
CREATE TABLE public.domainmetadata (
id integer NOT NULL,
domain_id integer,
kind character varying(32),
content text
);
"""
domain = models.ForeignKey('pdns.Domain', blank=True, null=True)
kind = models.CharField(max_length=32, blank=True, null=True)
content = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'domainmetadata'
def __unicode__(self):
return '(%s: %s=%s)' % (self.domain, self.kind, self.content)
def save(self, *args, **kwargs):
logger.info('Saving domainmetadata entry for domain "%s": "%s"="%s"',
self.domain, self.kind, self.content)
return super(DomainMetadata, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
logger.info('Deleting domainmetadata entry for domain "%s": "%s"="%s"',
self.domain, self.kind, self.content)
return super(DomainMetadata, self).delete(*args, **kwargs)
@property
def get_content(self):
return self.content
class TsigKey(models.Model):
"""
This table stores TSIG DNS keys used for AXFR. It is referenced by DomainMetadata model.
CREATE TABLE public.tsigkeys (
id integer NOT NULL,
name character varying(255),
algorithm character varying(50),
secret character varying(255),
CONSTRAINT c_lowercase_name CHECK (((name)::text = lower((name)::text)))
);
"""
# ALGORITHM = (
# (MD5, "hmac-md5"),
# (SHA1, "hmac-sha1"),
# (SHA224, "hmac-sha224"),
# (SHA256, "hmac-sha256"),
# (SHA384, "hmac-sha384"),
# (SHA512, "hmac-sha512"),
# )
ALGORITHM = (
('hmac-md5', 'hmac-md5'),
('hmac-sha1', 'hmac-sha1'),
('hmac-sha224', 'hmac-sha224'),
('hmac-sha256', 'hmac-sha256'),
('hmac-sha384', 'hmac-sha384'),
('hmac-sha512', 'hmac-sha512'),
)
ALGORITHM_DEFAULT = 'hmac-sha256'
ALGORITHMS = [x[1] for x in ALGORITHM]
name = models.CharField(max_length=255, blank=True, null=True)
algorithm = models.CharField(max_length=50, blank=True, null=True, choices=ALGORITHM, default=ALGORITHM_DEFAULT)
secret = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = False
db_table = 'tsigkeys'
unique_together = (('name', 'algorithm'),)
def validate(self):
logger.error('Matching name: ', self.name)
if not re.match('^[A-Za-z0-9\._/-]{1,250}$', self.name):
logger.error('Matching name 2x: ', self.name)
raise InvalidInput('Invalid TSIG name: "%s"' % self.name)
if len(self.secret) > 250:
raise InvalidInput('TSIG secret too long')
if self.algorithm not in self.ALGORITHMS:
raise InvalidInput('Invalid TSIG algorithm: "%s". Must be one of: %s' % (self.algorithm, self.ALGORITHMS))
return True
def __str__(self):
return '(%s:%s:%s)' % (self.algorithm, self.name, self.secret)
def __unicode__(self):
return self.__str__()
def to_str(self):
return '%s:%s:%s' % (self.algorithm, self.name, self.secret)
# def save(self, *args, **kwargs):
# logger.info('Saving tsigkey entry "%s" with algoritm "%s" and content "%s"',
# self.name, self.algorithm, self.secret)
# return super(TsigKey, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
for domain in self.get_linked_axfr_domains():
self.unlink_axfr_domain(domain)
logger.info('Deleting tsigkey entry "%s" with algoritm "%s" and content "%s"',
self.name, self.algorithm, self.secret)
return super(TsigKey, self).delete(*args, **kwargs)
@staticmethod
def get_linked_axfr_keys(domain):
"""
Returns list of TSIG keys set for specified domain.
:param domain: Domain object
:return: list of TsigKey objects
"""
return [TsigKey.objects.get(name=key_name.content) for key_name in
DomainMetadata.objects.filter(kind='TSIG-ALLOW-AXFR', domain_id=domain.id)]
def get_linked_axfr_domains(self):
"""
Returns list of domains which use this TSIG key.
:return: list of Domain objects
"""
return [entry.domain for entry in DomainMetadata.objects.filter(kind='TSIG-ALLOW-AXFR', content=self.name)]
def link_to_axfr_domain(self, domain):
"""
Sets TSIG-ALLOW-AXFR for domain.id to tsigkey.name.
:param domain: Domain object
:return: django exception on error, else nothing
"""
try:
DomainMetadata.objects.get(kind='TSIG-ALLOW-AXFR', domain_id=domain.id, content=self.name)
# key already exists, do nothing
return
except DomainMetadata.DoesNotExist:
DomainMetadata(kind='TSIG-ALLOW-AXFR', domain_id=domain.id, content=self.name).save()
def unlink_axfr_domain(self, domain):
"""
Removes entry TSIG-ALLOW-AXFR for domain.id pointing to self.name.
:param domain: Domain object
:return: django exception on error, else nothing
"""
try:
link = DomainMetadata.objects.get(kind='TSIG-ALLOW-AXFR', domain_id=domain.id, content=self.name)
link.delete()
if len(self.get_linked_axfr_domains()) == 0:
# no domains use this key anymore... we can delete it
logger.info('The TSIG key "%s" is no longer in use. Deleting it.' % self.name)
self.delete()
except DomainMetadata.DoesNotExist:
# nothing to delete
return
@staticmethod
def parse_tsig_string(text):
"""
Converts string to TsigKey object.
:param text: "algorithm:name:key" or "name:key" (default algorithm is TsigKey.ALGORITHM_DEFAULT)
:return: new TsigKey object or None on error
"""
try:
key = text.split(':')
if len(key) == 2:
return TsigKey(algorithm=TsigKey.ALGORITHM_DEFAULT, name=key[0], secret=key[1])
elif len(key) == 1:
# this creates invalid TsigKey but the name can be filled in also later to make the object valid
return TsigKey(algorithm=TsigKey.ALGORITHM_DEFAULT, name='', secret=key[0])
else:
return TsigKey(algorithm=key[0], name=key[1], secret=key[2])
except IndexError:
# invalid TSIG string format
return None
|
11494017
|
from pypy.annotation import model as annmodel
from pypy.tool.pairtype import pair, pairtype
from pypy.jit.hintannotator.bookkeeper import getbookkeeper
from pypy.rpython.lltypesystem import lltype, lloperation
from pypy.rpython.ootypesystem import ootype
from pypy.translator.simplify import get_funcobj, get_functype
UNARY_OPERATIONS = """same_as hint getfield setfield getsubstruct getarraysize
getinteriorfield getinteriorarraysize setinteriorfield
cast_pointer
direct_call
indirect_call
int_is_true int_neg int_abs int_invert bool_not
int_neg_ovf int_abs_ovf
uint_is_true
cast_int_to_char
cast_int_to_uint
cast_uint_to_int
cast_char_to_int
cast_bool_to_int
cast_ptr_to_int
ptr_nonzero
ptr_iszero
is_early_constant
oogetfield
oosetfield
oononnull
ooupcast
oodowncast
oois
subclassof
instanceof
oostring
""".split()
BINARY_OPERATIONS = """int_add int_sub int_mul int_mod int_and int_rshift
int_lshift int_floordiv int_xor int_or
int_add_ovf int_sub_ovf int_mul_ovf int_mod_ovf
int_floordiv_ovf int_lshift_ovf int_add_nonneg_ovf
uint_add uint_sub uint_mul uint_mod uint_and
uint_lshift uint_rshift uint_floordiv
char_gt char_lt char_le char_ge char_eq char_ne
int_gt int_lt int_le int_ge int_eq int_ne
uint_gt uint_lt uint_le uint_ge uint_eq uint_ne
getarrayitem setarrayitem
getarraysubstruct
ptr_eq ptr_ne""".split()
class HintError(Exception):
pass
class OriginFlags(object):
fixed = False
read_positions = None
greenargs = False
def __init__(self, bookkeeper=None, spaceop=None):
self.bookkeeper = bookkeeper
self.spaceop = spaceop
def __repr__(self):
return '<%s %s>' % (getattr(self.spaceop, 'result', '?'),
self.reprstate())
def reprstate(self):
if self.fixed:
s = "fixed "
elif self.greenargs:
s = "green"
else:
s = ""
return "%sorigin" % (s,)
def read_fixed(self):
if self.read_positions is None:
self.read_positions = {}
self.read_positions[getbookkeeper().position_key] = True
return self.fixed
def set_fixed(self):
if not self.fixed:
self.fixed = True
if self.read_positions:
annotator = getbookkeeper().annotator
for p in self.read_positions:
annotator.reflowfromposition(p)
def record_dependencies(self, greenorigindependencies,
callreturndependencies):
deps = greenorigindependencies.setdefault(self, [])
deps.extend(self.spaceop.args)
class CallOpOriginFlags(OriginFlags):
def record_dependencies(self, greenorigindependencies,
callreturndependencies):
bk = self.bookkeeper
if self.spaceop.opname in ('direct_call', 'ts_metacall'):
args = self.spaceop.args[1:]
elif self.spaceop.opname == 'indirect_call':
args = self.spaceop.args[1:-1]
# indirect_call with a red callable must return a red
# (see test_indirect_yellow_call)
v_callable = self.spaceop.args[0]
retdeps = greenorigindependencies.setdefault(self, [])
retdeps.append(v_callable)
elif self.spaceop.opname == 'oosend':
args = self.spaceop.args[1:]
methname = self.spaceop.args[0].value
TYPE = self.spaceop.args[1].concretetype
graphs = TYPE._lookup_graphs(methname)
if len(graphs) > 1:
v_self = self.spaceop.args[1]
retdeps = greenorigindependencies.setdefault(self, [])
retdeps.append(v_self)
else:
raise AssertionError(self.spaceop.opname)
graph = self.any_called_graph
call_families = bk.tsgraph_maximal_call_families
_, repgraph, callfamily = call_families.find(graph)
# record the argument and return value dependencies
retdeps = callreturndependencies.setdefault(self, [])
for graph in callfamily.tsgraphs:
retdeps.append(graph)
for i, v in enumerate(args):
argorigin = bk.myinputargorigin(graph, i)
deps = greenorigindependencies.setdefault(argorigin, [])
deps.append(v)
class InputArgOriginFlags(OriginFlags):
def __init__(self, bookkeeper, graph, i):
OriginFlags.__init__(self, bookkeeper)
self.graph = graph
self.i = i
def getarg(self):
return self.graph.getargs()[self.i]
def __repr__(self):
return '<%s %s>' % (self.getarg(), self.reprstate())
def record_dependencies(self, greenorigindependencies,
callreturndependencies):
bk = self.bookkeeper
call_families = bk.tsgraph_maximal_call_families
_, repgraph, callfamily = call_families.find(self.graph)
# record the fact that each graph's input args should be as red
# as each other's
if self.graph is repgraph:
deps = greenorigindependencies.setdefault(self, [])
v = self.getarg()
for othergraph in callfamily.tsgraphs:
if othergraph is not repgraph:
deps.append(othergraph.getargs()[self.i])
otherorigin = bk.myinputargorigin(othergraph, self.i)
otherdeps = greenorigindependencies.setdefault(otherorigin,
[])
otherdeps.append(v)
# ____________________________________________________________
class SomeLLAbstractValue(annmodel.SomeObject):
def __init__(self, T, deepfrozen=False):
self.concretetype = T
assert self.__class__ != SomeLLAbstractValue
self.deepfrozen = deepfrozen
def is_green(self):
return False
def clone(self):
c = object.__new__(self.__class__)
c.__dict__.update(self.__dict__)
return c
class SomeLLAbstractConstant(SomeLLAbstractValue):
" color: dont know yet.. "
def __init__(self, T, origins, eager_concrete=False, myorigin=None,
deepfrozen=False):
SomeLLAbstractValue.__init__(self, T, deepfrozen)
self.origins = origins
self.eager_concrete = eager_concrete
self.myorigin = myorigin
def fmt_origins(self, origins):
counts = {}
for o in origins:
x = o.reprstate()
counts[x] = counts.get(x, 0) + 1
items = counts.items()
items.sort()
lst = []
for key, count in items:
s = ''
if count > 1:
s += '%d*' % count
s += key
lst.append(s)
return '<%s>' % (', '.join(lst),)
def fmt_myorigin(self, myorigin):
if myorigin is None:
return None
else:
return repr(myorigin)
def is_fixed(self):
for o in self.origins:
if not o.fixed:
return False
return self.concretetype is not lltype.Void
def is_green(self):
return (self.concretetype is lltype.Void or
self.is_fixed() or self.eager_concrete or
(self.myorigin is not None and self.myorigin.greenargs))
def annotationcolor(self):
"""Compute the color of the variables with this annotation
for the pygame viewer
"""
try:
if self.concretetype is lltype.Void:
return annmodel.s_ImpossibleValue.annotationcolor
elif self.eager_concrete:
return (0,100,0) # green
elif self.is_green():
return (50,140,0) # green-dark-cyan
else:
return None
except KeyError: # can occur in is_green() if annotation crashed
return (0,200,200)
annotationcolor = property(annotationcolor)
class SomeLLAbstractVariable(SomeLLAbstractValue):
" color: hopelessly red"
def __init__(self, T, deepfrozen=False):
SomeLLAbstractValue.__init__(self, T, deepfrozen)
assert T is not lltype.Void # use bookkeeper.valueoftype()
def variableoftype(TYPE, deepfrozen=False):
# the union of all annotations of the given TYPE - that's a
# SomeLLAbstractVariable, unless TYPE is Void
if TYPE is lltype.Void:
return s_void
else:
return SomeLLAbstractVariable(TYPE, deepfrozen=deepfrozen)
class SomeLLAbstractContainer(SomeLLAbstractValue):
deepfrozen = False # XXX for now
def __init__(self, contentdef):
self.contentdef = contentdef
T = contentdef.T
if isinstance(T, ootype.OOType):
self.concretetype = T
else:
self.concretetype = lltype.Ptr(T)
def annotationcolor(self):
"""Compute the color of the variables with this annotation
for the pygame viewer
"""
if getattr(self.contentdef, 'degenerated', False):
return None
else:
return (0,60,160) # blue
annotationcolor = property(annotationcolor)
s_void = SomeLLAbstractConstant(lltype.Void, {})
setunion = annmodel.setunion
def setadd(set, newitem):
if newitem not in set:
set = set.copy()
set[newitem] = True
return set
def newset(set, *sets):
set = set.copy()
for s2 in sets:
set.update(s2)
return set
def reorigin(hs_v1, *deps_hs):
"""Make a copy of hs_v1 with its origins removed and replaced by myorigin().
Optionally, the origins of other annotations can also be added.
"""
if isinstance(hs_v1, SomeLLAbstractConstant):
deps_origins = [hs_dep.origins for hs_dep in deps_hs
if isinstance(hs_dep, SomeLLAbstractConstant)]
d = newset({getbookkeeper().myorigin(): True},
*deps_origins)
return SomeLLAbstractConstant(hs_v1.concretetype, d,
eager_concrete=hs_v1.eager_concrete,
deepfrozen=hs_v1.deepfrozen)
else:
return hs_v1
def originalconcretetype(hs):
if isinstance(hs, annmodel.SomeImpossibleValue):
return lltype.Void
else:
return hs.concretetype
def deepunfreeze(hs):
if hs.deepfrozen:
hs = hs.clone()
hs.deepfrozen = False
return hs
# ____________________________________________________________
# operations
class __extend__(SomeLLAbstractValue):
def same_as(hs_v1):
return hs_v1
def hint(hs_v1, hs_flags):
if hs_flags.const.get('variable', False): # only for testing purposes!!!
return SomeLLAbstractVariable(hs_v1.concretetype)
if hs_flags.const.get('forget', False):
# turn a variable to a constant
origin = getbookkeeper().myorigin()
return SomeLLAbstractConstant(hs_v1.concretetype, {origin: True})
if hs_flags.const.get('promote', False):
hs_concrete = SomeLLAbstractConstant(hs_v1.concretetype, {})
#hs_concrete.eager_concrete = True
return hs_concrete
if hs_flags.const.get('deepfreeze', False):
hs_clone = hs_v1.clone()
hs_clone.deepfrozen = True
return hs_clone
for name in ["reverse_split_queue", "global_merge_point",
"access_directly"]:
if hs_flags.const.get(name, False):
return
raise HintError("hint %s makes no sense on %r" % (hs_flags.const,
hs_v1))
def is_early_constant(hs_v1):
return SomeLLAbstractConstant(lltype.Bool, {})
def getfield(hs_v1, hs_fieldname):
S = hs_v1.concretetype.TO
FIELD_TYPE = getattr(S, hs_fieldname.const)
return variableoftype(FIELD_TYPE, hs_v1.deepfrozen)
def oogetfield(hs_v1, hs_fieldname):
_, FIELD_TYPE = hs_v1.concretetype._lookup_field(hs_fieldname.const)
return variableoftype(FIELD_TYPE, hs_v1.deepfrozen)
def setfield(hs_v1, hs_fieldname, hs_value):
pass
def oosetfield(hs_v1, hs_fieldname, hs_value):
pass
def getsubstruct(hs_v1, hs_fieldname):
S = hs_v1.concretetype.TO
FIELD_TYPE = getattr(S, hs_fieldname.const)
return SomeLLAbstractVariable(lltype.Ptr(FIELD_TYPE), hs_v1.deepfrozen)
def _getinterior(hs_v1, *offsets_hs):
hs_container = hs_v1
for hs_offset in offsets_hs:
if hs_offset.concretetype is lltype.Signed:
hs_container = pair(hs_container,hs_offset).getarraysubstruct()
else:
hs_container = hs_container.getsubstruct(hs_offset)
return hs_container
def getinteriorfield(hs_v1, *offsets_hs):
hs_container = hs_v1._getinterior(*offsets_hs[:-1])
hs_lastofs = offsets_hs[-1]
if hs_lastofs.concretetype is lltype.Signed:
return pair(hs_container, hs_lastofs).getarrayitem()
else:
return hs_container.getfield(hs_lastofs)
def getinteriorarraysize(hs_v1, *offsets_hs):
return hs_v1._getinterior(*offsets_hs).getarraysize()
def setinteriorfield(hs_v1, *offsets_and_val_hs):
hs_inner = hs_v1._getinterior(*offsets_and_val_hs[:-2])
hs_lastofs = offsets_and_val_hs[-2]
hs_value = offsets_and_val_hs[-1]
if hs_lastofs.concretetype is lltype.Signed:
pair(hs_inner, hs_lastofs).setarrayitem(hs_value)
else:
hs_inner.setfield(hs_lastofs, hs_value)
def cast_pointer(hs_v1):
RESTYPE = getbookkeeper().current_op_concretetype()
return SomeLLAbstractVariable(RESTYPE, hs_v1.deepfrozen)
ooupcast = cast_pointer
oodowncast = cast_pointer
def indirect_call(hs_v1, *args_hs):
hs_graph_list = args_hs[-1]
args_hs = args_hs[:-1]
assert hs_graph_list.is_constant()
graph_list = hs_graph_list.const
FUNC = get_functype(hs_v1.concretetype)
return hs_v1._call_multiple_graphs(graph_list, FUNC.RESULT, *args_hs)
def _call_multiple_graphs(hs_v1, graph_list, RESULT, *args_hs):
if graph_list is None:
# cannot follow indirect calls to unknown targets
return variableoftype(RESULT)
bookkeeper = getbookkeeper()
myorigin = bookkeeper.myorigin()
myorigin.__class__ = CallOpOriginFlags # thud
fixed = myorigin.read_fixed()
tsgraphs_accum = []
hs_res = bookkeeper.graph_family_call(graph_list, fixed, args_hs,
tsgraphs_accum, hs_v1)
myorigin.any_called_graph = tsgraphs_accum[0]
if isinstance(hs_res, SomeLLAbstractConstant):
hs_res.myorigin = myorigin
# we need to make sure that hs_res does not become temporarily less
# general as a result of calling another specialized version of the
# function
return annmodel.unionof(hs_res, bookkeeper.current_op_binding())
def _call_single_graph(hs_f1, graph, RESULT, *args_hs):
bookkeeper = getbookkeeper()
if not bookkeeper.annotator.policy.look_inside_graph(graph):
return cannot_follow_call(bookkeeper, graph, args_hs, RESULT)
# recursive call from the entry point to itself: ignore them and
# just hope the annotations are correct
if (bookkeeper.getdesc(graph)._cache.get(None, None) is
bookkeeper.annotator.translator.graphs[0]):
return variableoftype(RESULT)
myorigin = bookkeeper.myorigin()
myorigin.__class__ = CallOpOriginFlags # thud
fixed = myorigin.read_fixed()
tsgraphs_accum = []
hs_res = bookkeeper.graph_call(graph, fixed, args_hs,
tsgraphs_accum)
myorigin.any_called_graph = tsgraphs_accum[0]
if isinstance(hs_res, SomeLLAbstractConstant):
hs_res.myorigin = myorigin
# we need to make sure that hs_res does not become temporarily less
# general as a result of calling another specialized version of the
# function
return annmodel.unionof(hs_res, bookkeeper.current_op_binding())
def oosend(hs_c1, hs_name, *args_hs):
TYPE = hs_c1.concretetype
name = hs_name.const
_, meth = TYPE._lookup(name)
METH = lltype.typeOf(meth)
graph_list = TYPE._lookup_graphs(name)
if not graph_list:
# it's a graphless method of a BuiltinADTType
bk = getbookkeeper()
return handle_highlevel_operation_novirtual(bk, True, TYPE.immutable, hs_c1, *args_hs)
elif len(graph_list) == 1:
# like a direct_call
graph = graph_list.pop()
return hs_c1._call_single_graph(graph, METH.RESULT, hs_c1, *args_hs) # prepend hs_c1 to the args
else:
# like an indirect_call
return hs_c1._call_multiple_graphs(graph_list, METH.RESULT, hs_c1, *args_hs) # prepend hs_c1 to the args
class __extend__(SomeLLAbstractConstant):
def same_as(hs_c1):
# this is here to prevent setup() below from adding a different
# version of same_as()
return hs_c1
def hint(hs_c1, hs_flags):
if hs_flags.const.get('concrete', False):
for o in hs_c1.origins:
o.set_fixed()
hs_concrete = reorigin(hs_c1)
hs_concrete.eager_concrete = True
return hs_concrete
if hs_flags.const.get('forget', False):
assert isinstance(hs_c1, SomeLLAbstractConstant)
return reorigin(hs_c1)
return SomeLLAbstractValue.hint(hs_c1, hs_flags)
def direct_call(hs_f1, *args_hs):
bookkeeper = getbookkeeper()
fnobj = get_funcobj(hs_f1.const)
if (bookkeeper.annotator.policy.oopspec and
hasattr(fnobj._callable, 'oopspec')):
# try to handle the call as a high-level operation
try:
return handle_highlevel_operation(bookkeeper, fnobj._callable,
*args_hs)
except NotImplementedError:
pass
# normal call
if not hasattr(fnobj, 'graph'):
raise NotImplementedError("XXX call to externals or primitives")
return hs_f1._call_single_graph(fnobj.graph, lltype.typeOf(fnobj).RESULT, *args_hs)
def getfield(hs_c1, hs_fieldname):
S = hs_c1.concretetype.TO
FIELD_TYPE = getattr(S, hs_fieldname.const)
return hs_c1.getfield_impl(S, FIELD_TYPE)
def oogetfield(hs_c1, hs_fieldname):
S = hs_c1.concretetype
_, FIELD_TYPE = S._lookup_field(hs_fieldname.const)
return hs_c1.getfield_impl(S, FIELD_TYPE)
def getfield_impl(hs_c1, S, FIELD_TYPE):
if S._hints.get('immutable', False) or hs_c1.deepfrozen:
origin = getbookkeeper().myorigin()
d = setadd(hs_c1.origins, origin)
return SomeLLAbstractConstant(FIELD_TYPE, d,
eager_concrete=hs_c1.eager_concrete,
myorigin=origin,
deepfrozen=hs_c1.deepfrozen)
else:
return variableoftype(FIELD_TYPE)
def getsubstruct(hs_c1, hs_fieldname):
S = hs_c1.concretetype.TO
SUB_TYPE = getattr(S, hs_fieldname.const)
origin = getbookkeeper().myorigin()
d = setadd(hs_c1.origins, origin)
return SomeLLAbstractConstant(lltype.Ptr(SUB_TYPE), d,
myorigin=origin,
deepfrozen=hs_c1.deepfrozen)
def cast_pointer(hs_c1):
bk = getbookkeeper()
origin = bk.myorigin()
d = setadd(hs_c1.origins, origin)
RESTYPE = bk.current_op_concretetype()
return SomeLLAbstractConstant(RESTYPE, d,
eager_concrete = hs_c1.eager_concrete,
myorigin = origin,
deepfrozen = hs_c1.deepfrozen)
ooupcast = cast_pointer
oodowncast = cast_pointer
class __extend__(SomeLLAbstractContainer):
def setfield(hs_s1, hs_fieldname, hs_value):
hs_s1.contentdef.generalize_field(hs_fieldname.const, hs_value)
def getfield(hs_s1, hs_fieldname):
return hs_s1.contentdef.read_field(hs_fieldname.const)
getsubstruct = getfield
def setarrayitem(hs_a1, hs_index, hs_value):
hs_a1.contentdef.generalize_item(hs_value)
def getarraysize(hs_a1):
origin = getbookkeeper().myorigin()
return SomeLLAbstractConstant(lltype.Signed, {origin: True})
def cast_pointer(hs_s1):
TO = getbookkeeper().current_op_concretetype()
res_vstruct =hs_s1.contentdef.cast(TO)
return SomeLLAbstractContainer(res_vstruct)
def ptr_nonzero(hs_s1):
return getbookkeeper().immutablevalue(True)
def ptr_iszero(hs_s1):
return getbookkeeper().immutablevalue(False)
# ____________________________________________________________
# binary
class __extend__(pairtype(SomeLLAbstractValue, SomeLLAbstractValue)):
def getarrayitem((hs_v1, hs_v2)):
return variableoftype(hs_v1.concretetype.TO.OF, hs_v1.deepfrozen)
def setarrayitem((hs_v1, hs_v2), hs_v3):
pass
def getarraysubstruct((hs_v1, hs_v2)):
return SomeLLAbstractVariable(lltype.Ptr(hs_v1.concretetype.TO.OF),
hs_v1.deepfrozen)
def union((hs_v1, hs_v2)):
if hs_v1.deepfrozen != hs_v2.deepfrozen:
hs_v1 = deepunfreeze(hs_v1)
hs_v2 = deepunfreeze(hs_v2)
if hs_v1 == hs_v2:
return hs_v1
return pair(hs_v1, hs_v2).union_frozen_equal()
def invalid_union((hs_v1, hs_v2)):
raise annmodel.UnionError("%s %s don't mix" % (hs_v1, hs_v2))
union_frozen_equal = invalid_union
class __extend__(pairtype(SomeLLAbstractVariable, SomeLLAbstractConstant),
pairtype(SomeLLAbstractConstant, SomeLLAbstractVariable)):
def union_frozen_equal((hs_v1, hs_v2)):
assert hs_v1.concretetype == hs_v2.concretetype
if (getattr(hs_v1, 'eager_concrete', False) or
getattr(hs_v2, 'eager_concrete', False)):
pair(hs_v1, hs_v2).invalid_union()
return variableoftype(hs_v1.concretetype, hs_v1.deepfrozen)
class __extend__(pairtype(SomeLLAbstractConstant, SomeLLAbstractConstant)):
def union_frozen_equal((hs_c1, hs_c2)):
assert hs_c1.concretetype == hs_c2.concretetype
d = newset(hs_c1.origins, hs_c2.origins)
if hs_c1.myorigin is hs_c2.myorigin:
myorigin = hs_c1.myorigin
else:
myorigin = None
return SomeLLAbstractConstant(hs_c1.concretetype, d,
eager_concrete = hs_c1.eager_concrete and
hs_c2.eager_concrete,
myorigin = myorigin,
deepfrozen = hs_c1.deepfrozen)
def getarrayitem((hs_c1, hs_index)):
A = hs_c1.concretetype.TO
READ_TYPE = A.OF
if A._hints.get('immutable', False) or hs_c1.deepfrozen:
origin = getbookkeeper().myorigin()
d = newset(hs_c1.origins, hs_index.origins, {origin: True})
return SomeLLAbstractConstant(READ_TYPE, d,
eager_concrete=hs_c1.eager_concrete,
myorigin=origin,
deepfrozen=hs_c1.deepfrozen)
else:
return variableoftype(READ_TYPE)
def getarraysubstruct((hs_c1, hs_index)):
A = hs_c1.concretetype.TO
SUB_TYPE = A.OF
origin = getbookkeeper().myorigin()
d = newset(hs_c1.origins, hs_index.origins, {origin: True})
return SomeLLAbstractConstant(lltype.Ptr(SUB_TYPE), d,
myorigin=origin,
deepfrozen=hs_c1.deepfrozen)
class __extend__(pairtype(SomeLLAbstractContainer, SomeLLAbstractContainer)):
def union_frozen_equal((hs_cont1, hs_cont2)):
contentdef = hs_cont1.contentdef.union(hs_cont2.contentdef)
return SomeLLAbstractContainer(contentdef) # XXX deepfrozen?
def ptr_eq((hs_cont1, hs_cont2)):
return SomeLLAbstractConstant(lltype.Bool, {})
def ptr_ne((hs_cont1, hs_cont2)):
return SomeLLAbstractConstant(lltype.Bool, {})
class __extend__(pairtype(SomeLLAbstractContainer, SomeLLAbstractValue)):
def union_frozen_equal((hs_cont1, hs_val2)):
hs_cont1.contentdef.mark_degenerated()
assert hs_cont1.concretetype == hs_val2.concretetype
return SomeLLAbstractVariable(hs_cont1.concretetype) # XXX deepfrozen?
class __extend__(pairtype(SomeLLAbstractValue, SomeLLAbstractContainer)):
def union_frozen_equal((hs_val1, hs_cont2)):
return pair(hs_cont2, hs_val1).union_frozen_equal()
class __extend__(pairtype(SomeLLAbstractContainer, SomeLLAbstractValue),
pairtype(SomeLLAbstractValue, SomeLLAbstractContainer)):
def ptr_eq(_):
return getbookkeeper().immutablevalue(False)
def ptr_ne(_):
return getbookkeeper().immutablevalue(True)
class __extend__(pairtype(SomeLLAbstractContainer, SomeLLAbstractConstant)):
def getarrayitem((hs_a1, hs_index)):
hs_res = hs_a1.contentdef.read_item()
return reorigin(hs_res, hs_res, hs_index)
# ____________________________________________________________
def handle_highlevel_operation_novirtual(bookkeeper, ismethod, immutable, *args_hs):
RESULT = bookkeeper.current_op_concretetype()
deepfrozen = ismethod and args_hs[0].deepfrozen # if self is deepfrozen, the result is it too
if ismethod and (immutable or args_hs[0].deepfrozen):
for hs_v in args_hs:
if not isinstance(hs_v, SomeLLAbstractConstant):
break
else:
myorigin = bookkeeper.myorigin()
d = newset({myorigin: True}, *[hs_c.origins
for hs_c in args_hs])
return SomeLLAbstractConstant(RESULT, d,
eager_concrete = False, # probably
myorigin = myorigin,
deepfrozen=deepfrozen)
return variableoftype(RESULT, deepfrozen=deepfrozen)
def handle_highlevel_operation(bookkeeper, ll_func, *args_hs):
# parse the oopspec and fill in the arguments
operation_name, args = ll_func.oopspec.split('(', 1)
assert args.endswith(')')
args = args[:-1] + ',' # trailing comma to force tuple syntax
if args.strip() == ',':
args = '()'
argnames = ll_func.func_code.co_varnames[:len(args_hs)]
d = dict(zip(argnames, args_hs))
argtuple = eval(args, d)
args_hs = []
for hs in argtuple:
if not isinstance(hs, SomeLLAbstractValue):
hs = bookkeeper.immutablevalue(hs)
args_hs.append(hs)
# end of rather XXX'edly hackish parsing
if bookkeeper.annotator.policy.novirtualcontainer:
# "blue variables" disabled, we just return a red var all the time.
# Exception: an operation on a frozen container is constant-foldable.
ismethod = '.' in operation_name
return handle_highlevel_operation_novirtual(bookkeeper, ismethod, False, *args_hs)
# --- the code below is not used any more except by test_annotator.py ---
if operation_name == 'newlist':
from pypy.jit.hintannotator.vlist import oop_newlist
handler = oop_newlist
else:
# dispatch on the 'self' argument if it is virtual
hs_self = args_hs[0]
args_hs = args_hs[1:]
type_name, operation_name = operation_name.split('.')
if not isinstance(hs_self, SomeLLAbstractContainer):
raise NotImplementedError
if getattr(hs_self.contentdef, 'type_name', None) != type_name:
raise NotImplementedError
try:
handler = getattr(hs_self.contentdef, 'oop_' + operation_name)
except AttributeError:
bookkeeper.warning('missing handler: oop_%s' % (operation_name,))
raise NotImplementedError
hs_result = handler(*args_hs) # which may raise NotImplementedError
return hs_result
def cannot_follow_call(bookkeeper, graph, args_hs, RESTYPE):
# the policy prevents us from following the call
pure_call = bookkeeper.is_pure_graph(graph)
# when calling pure graphs, consider the call as an operation.
for hs in args_hs:
if not isinstance(hs, SomeLLAbstractConstant):
pure_call = False
break
if pure_call:
# if all arguments are SomeLLAbstractConstant, so can the result be.
myorigin = bookkeeper.myorigin()
d = newset({myorigin: True}, *[hs_c.origins for hs_c in args_hs])
h_res = SomeLLAbstractConstant(RESTYPE, d,
eager_concrete = False, # probably
myorigin = myorigin)
else:
h_res = variableoftype(RESTYPE)
return h_res
# ____________________________________________________________
#
# Register automatically simple operations
def var_unary(hs_v, *rest_hs):
RESTYPE = getbookkeeper().current_op_concretetype()
return SomeLLAbstractVariable(RESTYPE)
def var_binary((hs_v1, hs_v2), *rest_hs):
RESTYPE = getbookkeeper().current_op_concretetype()
return SomeLLAbstractVariable(RESTYPE)
def const_unary(llop, hs_c1):
#XXX unsure hacks
bk = getbookkeeper()
origin = bk.myorigin()
d = setadd(hs_c1.origins, origin)
RESTYPE = bk.current_op_concretetype()
hs_res = SomeLLAbstractConstant(RESTYPE, d,
eager_concrete = hs_c1.eager_concrete,
myorigin = origin)
if hs_c1.is_constant():
try:
hs_res.const = llop(RESTYPE, hs_c1.const)
except Exception: # XXX not too nice
pass
return hs_res
def const_binary(llop, (hs_c1, hs_c2)):
#XXX unsure hacks
bk = getbookkeeper()
origin = bk.myorigin()
d = newset(hs_c1.origins, hs_c2.origins, {origin: True})
RESTYPE = bk.current_op_concretetype()
hs_res = SomeLLAbstractConstant(RESTYPE, d,
eager_concrete = hs_c1.eager_concrete or
hs_c2.eager_concrete,
myorigin = origin)
if hs_c1.is_constant() and hs_c2.is_constant():
try:
hs_res.const = llop(RESTYPE, hs_c1.const, hs_c2.const)
except Exception: # XXX not too nice
pass
return hs_res
def setup(oplist, ValueCls, var_fn, ConstantCls, const_fn):
for name in oplist:
llop = getattr(lloperation.llop, name)
if not llop.sideeffects or llop.tryfold:
if name not in ValueCls.__dict__:
setattr(ValueCls, name, var_fn)
if llop.canfold or llop.tryfold:
if name not in ConstantCls.__dict__:
setattr(ConstantCls, name,
lambda s, llop=llop: const_fn(llop, s))
setup(UNARY_OPERATIONS,
SomeLLAbstractValue, var_unary,
SomeLLAbstractConstant, const_unary)
setup(BINARY_OPERATIONS,
pairtype(SomeLLAbstractValue, SomeLLAbstractValue), var_binary,
pairtype(SomeLLAbstractConstant, SomeLLAbstractConstant), const_binary)
del setup
|
11494024
|
import os
import pickle
import sys
from gensim.models.word2vec import Word2Vec
import numpy
from sklearn.cluster import KMeans
from customlogging import logger
import conf
if __name__ == '__main__':
w2vmodelfile = os.path.join(conf.W2V_DIR, 'model')
if not os.path.exists(w2vmodelfile):
print('Word2vec model file "%s" not found.' % w2vmodelfile)
print('Did you run train_word2vec.py?')
sys.exit(1)
logger.debug('Loading word embedding')
emb = Word2Vec.load(w2vmodelfile)
vocab = emb.index2word
wordvecs = emb.wv.syn0
# add UNK word at origin of embedding space
vocab.append('UNK')
wordvecs = numpy.vstack((wordvecs, numpy.zeros(wordvecs.shape[1])))
clusterer = KMeans(n_clusters=conf.BOCID_NCLUSTERS, random_state=conf.SEED,
max_iter=conf.BOCID_CLUSTITER, n_jobs=-1)
logger.debug('Starting clustering')
VC = clusterer.fit_predict(wordvecs)
logger.debug('Matching words to cluster IDs')
word2cid = { vocab[k]: VC[k] for k in range(len(vocab)) }
pickle.dump(word2cid, open(conf.BOCID_CLUSTFILE, 'wb'))
logger.debug('Wrote word-to-cluster-ID mapping to "%s"',
conf.BOCID_CLUSTFILE)
|
11494039
|
import json
import os
from newrelic.agent import NewRelicContextFormatter
# Set DEBUG = True to enable debugging application.
DEBUG = os.environ.get("ITS_DEBUG", "false").lower() == "true"
# We don't want to enforce type checks in production environments (probably)
ENFORCE_TYPE_CHECKS = (
os.environ.get("ITS_ENFORCE_TYPE_CHECKS", "false").lower() == "true"
)
MIME_TYPES = {
"PNG": "image/png",
"JPEG": "image/jpeg",
"JPG": "image/jpeg",
"WEBP": "image/webp",
"SVG": "image/svg+xml",
}
PNGQUANT_PATH = os.environ.get("ITS_PNGQUANT_PATH", "pngquant")
DEFAULT_JPEG_QUALITY = int(os.environ.get("ITS_DEFAULT_JPEG_QUALITY", "95"))
DEFAULT_NAMESPACES = json.dumps(
{
"default": {"loader": "http", "prefixes": [""]},
"overlay": {"loader": "file_system", "prefixes": ["test/overlay"]},
"folders": {"loader": "file_system", "prefixes": [""]},
"tests": {"loader": "file_system", "folders": ["tests/images"]},
"merlin": {
"loader": "http",
"prefixes": ["s3.amazonaws.com", "pbs.merlin.cdn.prod"],
},
"station-images": {
"redirect": True,
"url": "https://station-service.example.com/station/image-redirects/",
"query-param": "url",
},
}
)
NAMESPACES = json.JSONDecoder().decode(
s=os.environ.get("ITS_BACKENDS", DEFAULT_NAMESPACES)
)
S3_FALLBACK = json.JSONDecoder().decode(
os.environ.get("ITS_S3_FALLBACK", '{}'))
AUTH_CREDENTIALS = json.JSONDecoder().decode(
os.environ.get("ITS_CREDENTIALS", '{}'))
DEFAULT_OVERLAYS = json.dumps({"passport": "tests/images/logo.png"})
DEFAULT_OVERLAY_POSITIONINGS = json.dumps(
{"passport": {"top": 0.05, "left": 0.05}})
DEFAULT_OVERLAY_PROPORTIONS = json.dumps({"passport": 0.2})
OVERLAYS = json.JSONDecoder().decode(
s=os.environ.get("ITS_OVERLAYS", DEFAULT_OVERLAYS))
OVERLAY_POSITIONINGS = json.JSONDecoder().decode(
s=os.environ.get("ITS_OVERLAY_POSITIONINGS", DEFAULT_OVERLAY_POSITIONINGS))
OVERLAY_PROPORTIONS = json.JSONDecoder().decode(
s=os.environ.get("ITS_OVERLAY_PROPORTIONS", DEFAULT_OVERLAY_PROPORTIONS))
# the keyword used to recognize focal point args in filenames
FOCUS_KEYWORD = os.environ.get("ITS_FOCUS_KEYWORD", "focus-")
DELIMITERS_RE = os.environ.get("ITS_DELIMITERS_RE", "[x_,]")
SENTRY_DSN = os.environ.get("ITS_SENTRY_DSN")
# for each domain in that list, ITS will respond to GET and HEAD requests with CORS headers
CORS_ORIGINS = [
'http://localhost:1234',
'http://localhost:8080',
'cordova://kartkingdom-app',
r'(.*)?\.?pbslearningmedia\.org/?',
r'(.*)?\.?pbslm\.org/?',
r'(.*)?\.?pbskids\.org/?',
r'(.*)?\.?pbs\.org/?',
]
# Logging dictionary to be used for dictConfig
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] %(levelname)s - %(name)s - %(message)s'
},
'newrelic': {
'()': NewRelicContextFormatter
},
},
'root': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'handlers': ['console', 'newrelic'],
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'verbose',
'class': 'logging.StreamHandler',
},
'newrelic': {
'level': 'DEBUG',
'formatter': 'newrelic',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'newrelic': {
'propagate': True,
'level': 'ERROR',
},
}
}
|
11494089
|
from typing import Tuple, Union, List, Optional, Any, Callable
from pydantic import BaseModel, StrictInt, StrictBool
from torch.utils import data
from tensorfn.config import Config
class DataLoader(Config):
batch_size: StrictInt = 1
shuffle: StrictBool = False
num_workers: StrictInt = 0
pin_memory: StrictBool = False
drop_last: StrictBool = False
timeout: StrictInt = 0
def make(
self,
dataset,
sampler=None,
batch_sampler=None,
collate_fn=None,
worker_init_fn=None,
multiprocessing_context=None,
):
return data.DataLoader(
dataset,
self.batch_size,
self.shuffle,
sampler,
batch_sampler,
self.num_workers,
collate_fn,
self.pin_memory,
self.drop_last,
self.timeout,
worker_init_fn,
multiprocessing_context,
)
def make_dataloader(
config,
dataset,
sampler=None,
batch_sampler=None,
collate_fn=None,
worker_init_fn=None,
multiprocessing_context=None,
):
return config.make(
dataset,
sampler,
batch_sampler,
collate_fn,
worker_init_fn,
multiprocessing_context,
)
|
11494145
|
class ListNode:
def __init__(self, data, next=None):
self.val = data
self.next = next
def make_list(elements):
head = ListNode(elements[0])
for element in elements[1:]:
ptr = head
while ptr.next:
ptr = ptr.next
ptr.next = ListNode(element)
return head
def print_list(head):
ptr = head
print("[", end="")
while ptr:
print(ptr.val, end=", ")
ptr = ptr.next
print("]")
class Heap:
def __init__(self):
self.arr = []
def print_heap(self):
res = " "
for i in self.arr:
res += str(i.val) + " "
print(res)
def getVal(self, i):
return self.arr[i].val
def parent(self, i):
return (i - 1) // 2
def left(self, i):
return 2 * i + 1
def right(self, i):
return 2 * i + 2
def insert(self, value):
self.arr.append(value)
n = len(self.arr) - 1
i = n
while i != 0 and self.arr[i].val < self.arr[self.parent(i)].val:
self.arr[i], self.arr[self.parent(i)] = (
self.arr[self.parent(i)],
self.arr[i],
)
i = self.parent(i)
def heapify(self, i):
left = self.left(i)
right = self.right(i)
smallest = i
n = len(self.arr)
if left < n and self.getVal(left) < self.getVal(smallest):
smallest = left
if right < n and self.getVal(right) < self.getVal(smallest):
smallest = right
if smallest != i:
self.arr[i], self.arr[smallest] = self.arr[smallest], self.arr[i]
self.heapify(smallest)
def extractMin(self):
n = len(self.arr)
if n == 0:
return "#"
if n == 1:
temp = self.arr[0]
self.arr.pop()
return temp
root = self.arr[0]
self.arr[0] = self.arr[-1]
self.arr.pop()
self.heapify(0)
return root
class Solution(object):
def mergeKLists(self, lists):
heap = Heap()
for i in lists:
if i:
heap.insert(i)
res = None
res_next = None
while True:
temp = heap.extractMin()
if temp == "#":
return res
if not res:
res = temp
res_next = temp
temp = temp.next
if temp:
heap.insert(temp)
res.next = None
else:
res_next.next = temp
temp = temp.next
res_next = res_next.next
if temp:
heap.insert(temp)
res_next.next = None
if __name__ == "__main__":
"""
from timeit import timeit
ob = Solution()
lists = [[1, 4, 5], [1, 3, 4], [2, 6]]
lls = []
for ll in lists:
l = make_list(ll)
lls.append(l)
print(timeit(lambda: print_list(ob.mergeKLists(lls)), number=10000)) # 0.11806906099809567
"""
|
11494162
|
from qtpy import QtCore, QtGui, QtWidgets
from perforce.AppInterop import interop
from perforce import PerforceUtils
def displayErrorUI(e):
error_ui = QtWidgets.QMessageBox()
error_ui.setWindowFlags(QtCore.Qt.WA_DeleteOnClose)
eMsg, type = PerforceUtils.parsePerforceError(e)
if type == "warning":
error_ui.warning(interop.main_parent_window(), "Perforce Warning", eMsg)
elif type == "error":
error_ui.critical(interop.main_parent_window(), "Perforce Error", eMsg)
else:
error_ui.information(interop.main_parent_window(), "Perforce Error", eMsg)
error_ui.deleteLater()
|
11494203
|
import os
import pytest
import pandas as pd
import numpy as np
@pytest.fixture()
def gdp_data(request):
df = pd.read_csv(os.path.join(os.path.dirname(request.module.__file__), 'data', 'gdp.csv'),
parse_dates=['DATE'])
df['gdp'] = (np.log(df.GDP) - np.log(df.GDP.shift(1))) * 100.
return df.set_index('DATE')
@pytest.fixture()
def pay_data(request):
df = pd.read_csv(os.path.join(os.path.dirname(request.module.__file__), 'data', 'pay.csv'),
parse_dates=['DATE'])
df['pay'] = (np.log(df.PAY) - np.log(df.PAY.shift(1))) * 100.
return df.set_index('DATE')
|
11494204
|
from enum import Enum, auto
class HorizontalLabelAlignment(Enum):
LeftOuter = auto()
Left = auto()
Center = auto()
Right = auto()
RightOuter = auto()
class VerticalLabelAlignment(Enum):
Top = auto()
Center = auto()
Bottom = auto()
class FlowPattern(Enum):
CoCurrent = auto()
CounterCurrent = auto()
|
11494206
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.boundsdata import boundsdata
def test_boundsdata():
"""Test module boundsdata.py by downloading
boundsdata.csv and testing shape of
extracted data has 1000 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = boundsdata(test_path)
try:
assert x_train.shape == (1000, 7)
except:
shutil.rmtree(test_path)
raise()
|
11494271
|
import pytest
import numpy as np
import tensorflow as tf
from kerod.core import sampling_ops
@pytest.mark.parametrize(
"np_indicator,num_samples,expected_num_samples",
[
[[True, False, True, False, True, True, False], 3, 3],
# indicator when less true elements than num_samples
[[True, False, True, False, True, True, False], 5, 4]
])
def test_subsample_indicator(np_indicator, num_samples, expected_num_samples):
indicator = tf.constant(np_indicator)
samples = sampling_ops.subsample_indicator(indicator, num_samples)
assert np.sum(samples) == expected_num_samples
np.testing.assert_array_equal(samples, np.logical_and(samples, np_indicator))
def test_batch_subsample_indicator():
numpy_labels = np.stack([np.arange(300) for _ in range(2)], axis=0)
indicator = tf.constant(np.ones((2, 300)) == 1)
numpy_labels = (numpy_labels - 200) > 0
labels = tf.constant(numpy_labels)
samples = sampling_ops.batch_sample_balanced_positive_negative(indicator, 64, labels)
assert np.sum(samples) == 128
assert samples.shape == (2, 300)
def test_subsample_indicator_when_num_samples_is_zero():
np_indicator = [True, False, True, False, True, True, False]
indicator = tf.constant(np_indicator)
samples_none = sampling_ops.subsample_indicator(indicator, 0)
np.testing.assert_array_equal(np.zeros_like(samples_none, dtype=bool), samples_none)
def test_subsample_indicator_when_indicator_all_false():
indicator_empty = tf.zeros([0], dtype=tf.bool)
samples_empty = sampling_ops.subsample_indicator(indicator_empty, 4)
assert samples_empty.numpy().size == 0
def test_subsample_all_examples():
numpy_labels = np.random.permutation(300)
indicator = tf.constant(np.ones(300) == 1)
numpy_labels = (numpy_labels - 200) > 0
labels = tf.constant(numpy_labels)
is_sampled = sampling_ops.sample_balanced_positive_negative(indicator, 64, labels)
assert np.sum(is_sampled) == 64
assert np.sum(np.logical_and(numpy_labels, is_sampled)) == 32
assert np.sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) == 32
def test_sample_balanced_positive_negative():
# Test random sampling when only some examples can be sampled:
# 100 samples, 20 positives, 10 positives cannot be sampled
numpy_labels = np.arange(100)
numpy_indicator = numpy_labels < 90
indicator = tf.constant(numpy_indicator)
numpy_labels = (numpy_labels - 80) >= 0
labels = tf.constant(numpy_labels)
is_sampled = sampling_ops.sample_balanced_positive_negative(indicator, 64, labels)
assert np.sum(is_sampled) == 64
assert np.sum(np.logical_and(numpy_labels, is_sampled)) == 10
assert np.sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) == 54
np.testing.assert_array_equal(is_sampled, np.logical_and(is_sampled, numpy_indicator))
def test_sample_balance_positive_negative_selection_larger_sample_size():
# Test random sampling when total number of examples that can be sampled are
# less than batch size:
# 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64.
numpy_labels = np.arange(100)
numpy_indicator = numpy_labels < 60
indicator = tf.constant(numpy_indicator)
numpy_labels = (numpy_labels - 50) >= 0
labels = tf.constant(numpy_labels)
is_sampled = sampling_ops.sample_balanced_positive_negative(indicator, 64, labels)
assert np.sum(is_sampled) == 60
assert np.sum(np.logical_and(numpy_labels, is_sampled)) == 10
assert np.sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) == 50
np.testing.assert_array_equal(is_sampled, np.logical_and(is_sampled, numpy_indicator))
def test_sample_balance_positive_negative_selection_no_sample_size():
# Test random sampling when only some examples can be sampled:
# 1000 samples, 6 positives (5 can be sampled).
numpy_labels = np.arange(1000)
numpy_indicator = numpy_labels < 999
indicator = tf.constant(numpy_indicator)
numpy_labels = (numpy_labels - 994) >= 0
labels = tf.constant(numpy_labels)
is_sampled = sampling_ops.sample_balanced_positive_negative(indicator,
None,
labels,
positive_fraction=0.01)
assert np.sum(is_sampled) == 500
assert np.sum(np.logical_and(numpy_labels, is_sampled)) == 5
assert np.sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) == 495
np.testing.assert_array_equal(is_sampled, np.logical_and(is_sampled, numpy_indicator))
def test_batch_sample_balanced_positive_negative():
numpy_labels = np.stack([np.arange(300) for _ in range(2)], axis=0)
indicator = tf.constant(np.ones((2, 300)) == 1)
numpy_labels = (numpy_labels - 200) > 0
labels = tf.constant(numpy_labels)
samples = sampling_ops.batch_sample_balanced_positive_negative(indicator, 64, labels)
assert np.sum(samples) == 128
assert samples.shape == (2, 300)
|
11494324
|
class Solution:
def minIncrementForUnique(self, A: 'List[int]') -> 'int':
A.sort()
num = 0
for i in range(1, len(A)):
if A[i] <= A[i - 1]:
num += A[i - 1] + 1 - A[i]
A[i] = A[i - 1] + 1
return num
|
11494390
|
import sys
import wx
from pubsub import pub
import matplotlib
if 'linux' not in sys.platform:
matplotlib.use("WXAgg")
try:
import seaborn
seaborn.set()
except ImportError:
pass
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx as NavigationToolbar
from matplotlib.figure import Figure
try:
# local import
from components import create_bitmap_dropdown_menu
except (ModuleNotFoundError, ImportError):
# Package import
from dshelper.components import create_bitmap_dropdown_menu
class HistPanel(wx.Panel):
"""
A panel displays the histogram plot for any given column
Args:
df --> pandas dataframe: passed internally for plotting
Returns: None
"""
def __init__(self, parent, df=None):
wx.Panel.__init__(self, parent)
self.df = df
self.available_columns = list(self.df.columns)
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
self.toolbar = NavigationToolbar(self.canvas)
self.dropdown_menu = create_bitmap_dropdown_menu(
self, self.available_columns, self.df
)
self.Bind(wx.EVT_COMBOBOX, self.column_selected)
toolbar_sizer = wx.BoxSizer(wx.HORIZONTAL)
toolbar_sizer.Add(self.dropdown_menu, 0, wx.ALL | wx.ALIGN_CENTER, 5)
toolbar_sizer.Add(self.toolbar, 0, wx.ALL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
sizer.Add(toolbar_sizer)
self.SetSizer(sizer)
self.Fit()
pub.subscribe(self.update_available_column, "UPDATE_DISPLAYED_COLUMNS")
def column_selected(self, event):
"""
Function responses to select column from dropdown menu.
"""
selected_column = self.dropdown_menu.GetStringSelection()
self.draw_hist(selected_column, self.df[selected_column])
def draw_hist(self, column_name, data):
"""
Function that draws plot in the panel.
Args:
column_name --> string: the name of the column that needs to
be drawn
data --> 1D dataframe: dataframe column extracted from df
(i.e. data = df[column_name])
Returns: None
"""
# Reset plot first
self.axes.clear()
try:
# Check data type
if data.dtype == "object":
# Different drawing method for strings
value_count = data.value_counts().sort_index()
value_count.plot(kind="bar", ax=self.axes)
else:
self.axes.hist(data.dropna(), bins=100)
except ValueError as e:
# log Error
_log_message = "\nHistogram plot failed due to error:\n--> {}".format(e)
pub.sendMessage("LOG_MESSAGE", log_message=_log_message)
# Set plot info
self.axes.set_title("Histogram Plot for %s" % column_name)
self.axes.set_ylabel("Value Count")
self.canvas.draw()
def update_available_column(self, available_columns):
"""
Update dataframe used for plotting.
Args:
available_columns --> list: a list of available column headers
Returns: None
"""
self.available_columns = available_columns
self.dropdown_menu.Clear()
for column in self.available_columns:
self.dropdown_menu.Append(column)
if __name__ == "__main__":
# Test for individual panel layout
app = wx.App(0)
frame = wx.Frame(None, wx.ID_ANY)
fa = HistPanel(frame)
frame.Show()
app.MainLoop()
|
11494403
|
import os
from time import sleep
import requests
from flask import abort, Flask, jsonify, request
from zappa.async import task
app = Flask(__name__)
def is_request_valid(request):
is_token_valid = request.form['token'] == os.environ['SLACK_VERIFICATION_TOKEN']
is_team_id_valid = request.form['team_id'] == os.environ['SLACK_TEAM_ID']
return is_token_valid and is_team_id_valid
@task
def hello_there_task(response_url):
sleep(5)
data = {
'response_type': 'in_channel',
'text': 'You _are_ a bold one.',
}
requests.post(response_url, json=data)
@app.route('/hello-there', methods=['POST'])
def hello_there():
if not is_request_valid(request):
abort(400)
hello_there_task(request.form['response_url'])
return jsonify(
response_type='in_channel',
text='<https://youtu.be/frszEJb0aOo|General Kenobi!>',
)
|
11494417
|
import os.path
from amitools.binfmt.BinFmt import BinFmt
from amitools.binfmt.Relocate import Relocate
from amitools.vamos.label import LabelSegment
from amitools.vamos.log import log_segload
from .seglist import SegList
class SegLoadInfo(object):
def __init__(self, seglist, bin_img=None, sys_file=None, ami_file=None):
self.seglist = seglist
self.bin_img = bin_img
self.sys_file = sys_file
self.ami_file = ami_file
def __str__(self):
return "[SegLoad:%s,sys=%s,ami=%s]" % (
self.seglist,
self.sys_file,
self.ami_file,
)
class SegmentLoader(object):
def __init__(self, alloc, path_mgr=None):
self.alloc = alloc
self.path_mgr = path_mgr
self.mem = alloc.get_mem()
self.binfmt = BinFmt()
# map seglist baddr to bin_img
self.infos = {}
def load_sys_seglist(self, sys_bin_file):
"""load seglist, register it, and return seglist baddr or 0"""
info = self.int_load_sys_seglist(sys_bin_file)
if info:
baddr = info.seglist.get_baddr()
self.infos[baddr] = info
log_segload.info("loaded sys seglist: %s", info)
return baddr
else:
log_segload.info("can't load sys seglist: %s", sys_bin_file)
return 0
def load_ami_seglist(self, ami_bin_file, lock=None):
"""load seglist, register it, and return seglist baddr or 0"""
info = self.int_load_ami_seglist(ami_bin_file, lock)
if info:
baddr = info.seglist.get_baddr()
self.infos[baddr] = info
log_segload.info("loaded ami seglist: %s", info)
return baddr
else:
log_segload.info("can't load ami seglist: %s", ami_bin_file)
return 0
def unload_seglist(self, seglist_baddr):
"""unregister given seglist baddr and free seglist.
return True if seglist was unloaded"""
if seglist_baddr not in self.infos:
log_segload.error("unknown seglist at @%06x", seglist_baddr)
return False
info = self.infos[seglist_baddr]
log_segload.info("unload seglist: %s", info)
del self.infos[seglist_baddr]
info.seglist.free()
return True
def get_info(self, seglist_baddr):
"""return associated bin_img for given registered seglist baddr or None"""
if seglist_baddr in self.infos:
return self.infos[seglist_baddr]
def register_seglist(self, baddr):
"""register custom seglist"""
info = SegLoadInfo(SegList(self.alloc, baddr))
log_segload.info("register seglist: %s", info)
self.infos[baddr] = info
def unregister_seglist(self, baddr):
"""remove custom seglist"""
info = self.infos[baddr]
log_segload.info("unregister seglist: %s", info)
del self.infos[baddr]
def shutdown(self):
"""check orphan seglists on shutdown and return number of orphans"""
log_segload.info("shutdown")
for baddr in self.infos:
info = self.infos[baddr]
log_segload.warning("orphaned seglist: %s", info)
# try to free list
info.seglist.free()
return len(self.infos)
def int_load_ami_seglist(self, ami_bin_file, lock=None):
"""load seglist given by ami binary path and return SegLoadInfo"""
if self.path_mgr is None:
return None
# try to map path
sys_path = self.path_mgr.ami_to_sys_path(lock, ami_bin_file, mustExist=True)
if sys_path:
info = self.int_load_sys_seglist(sys_path)
info.ami_file = ami_bin_file
return info
def int_load_sys_seglist(self, sys_bin_file):
"""load seglist given by sys binary path and return SegLoadInfo"""
base_name = os.path.basename(sys_bin_file)
# does file exist?
if not os.path.isfile(sys_bin_file):
log_segload.debug("no file: %s", sys_bin_file)
return None
# try to load bin image in supported format (e.g. HUNK or ELF)
bin_img = self.binfmt.load_image(sys_bin_file)
if bin_img is None:
log_segload.debug("load_image failed: %s", sys_bin_file)
return None
# create relocator
relocator = Relocate(bin_img)
# get info about segments to allocate
sizes = relocator.get_sizes()
names = bin_img.get_segment_names()
bin_img_segs = bin_img.get_segments()
# build label names
if self.alloc.label_mgr:
labels = []
for i in range(len(sizes)):
name = "%s_%d:%s" % (base_name, i, names[i].lower())
labels.append(name)
else:
labels = None
# allocate seg list
seg_list = SegList.alloc(self.alloc, sizes, labels, bin_img_segs)
# retrieve addr
addrs = seg_list.get_all_addrs()
# relocate to addresses and return data
datas = relocator.relocate(addrs)
# write contents to allocated memory
for i in range(len(sizes)):
# write data to segments
self.mem.w_block(addrs[i], datas[i])
return SegLoadInfo(seg_list, bin_img, sys_bin_file)
|
11494440
|
import tensorflow as tf
import cv2
import time
import argparse
from posenet.posenet_factory import load_model
from posenet.utils import draw_skel_and_kp
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='resnet50') # mobilenet resnet50
parser.add_argument('--stride', type=int, default=16) # 8, 16, 32 (max 16 for mobilenet)
parser.add_argument('--quant_bytes', type=int, default=4) # 4 = float
parser.add_argument('--multiplier', type=float, default=1.0) # only for mobilenet
parser.add_argument('--scale_factor', type=float, default=0.7125)
parser.add_argument('--input_file', type=str, help="Give the video file location")
parser.add_argument('--output_file', type=str, help="Give the video file location")
args = parser.parse_args()
def main():
print('Tensorflow version: %s' % tf.__version__)
assert tf.__version__.startswith('2.'), "Tensorflow version 2.x must be used!"
model = args.model # mobilenet resnet50
stride = args.stride # 8, 16, 32 (max 16 for mobilenet, min 16 for resnet50)
quant_bytes = args.quant_bytes # float
multiplier = args.multiplier # only for mobilenet
posenet = load_model(model, stride, quant_bytes, multiplier)
# for inspiration, see: https://www.programcreek.com/python/example/72134/cv2.VideoWriter
if args.input_file is not None:
cap = cv2.VideoCapture(args.input_file)
else:
raise IOError("video file not found")
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
video_writer = cv2.VideoWriter(args.output_file, fourcc, fps, (width, height))
max_pose_detections = 20
# Scaling the input image reduces the quality of the pose detections!
# The speed gain is about the square of the scale factor.
posenet_input_height = 540 # scale factor for the posenet input
posenet_input_scale = 1.0 # posenet_input_height / height # 1.0
posenet_input_width = int(width * posenet_input_scale)
print("posenet_input_scale: %3.4f" % (posenet_input_scale))
start = time.time()
frame_count = 0
ret, frame = cap.read()
while ret:
if posenet_input_scale == 1.0:
frame_rescaled = frame # no scaling
else:
frame_rescaled = \
cv2.resize(frame, (posenet_input_width, posenet_input_height), interpolation=cv2.INTER_LINEAR)
pose_scores, keypoint_scores, keypoint_coords = posenet.estimate_multiple_poses(frame_rescaled, max_pose_detections)
keypoint_coords_upscaled = keypoint_coords / posenet_input_scale
overlay_frame = draw_skel_and_kp(
frame, pose_scores, keypoint_scores, keypoint_coords_upscaled,
min_pose_score=0.15, min_part_score=0.1)
frame_count += 1
# This is uncompressed video. cv2 has no way to write compressed videos, so we'll have to use ffmpeg to
# compress it afterwards! See:
# https://stackoverflow.com/questions/25998799/specify-compression-quality-in-python-for-opencv-video-object
video_writer.write(overlay_frame)
ret, frame = cap.read()
print('Average FPS: ', frame_count / (time.time() - start))
video_writer.release()
cap.release()
if __name__ == "__main__":
main()
|
11494444
|
import face_recognition
import numpy as np
import pickle
from config import ConfigFacialId
from config import ConfigVideoFrame
class FacialIdDataset():
def __init__(self):
self.known_face_names = []
self.known_face_encodings = []
self.all_face_encodings = {}
self.realtime_face_encodings = []
self.realtime_face_names = []
def load(self):
try:
with open(ConfigFacialId.DATASET_FILENAME, 'rb') as f:
self.all_face_encodings = pickle.load(f)
self.update()
except:
print(f'{ConfigFacialId.DATASET_FILENAME} file not found!')
def add(self, name):
self.load()
face_saved = False
while not face_saved:
if len(self.realtime_face_encodings) > 0:
if self.realtime_face_names[0] == ConfigVideoFrame.UNKNOW_FACE_TEXT:
self.all_face_encodings[name] = self.realtime_face_encodings[0]
self.save()
face_saved = True
def addFromFile(self, name, image_path):
loaded_image = face_recognition.load_image_file(image_path)
face_encoding = face_recognition.face_encodings(loaded_image)[0]
self.all_face_encodings[name] = face_encoding
self.save()
def remove(self, to_remove):
self.load()
self.all_face_encodings.pop(to_remove, None)
self.save()
def update(self):
self.known_face_names = list(
self.all_face_encodings.keys())
self.known_face_encodings = np.array(
list(self.all_face_encodings.values()))
def save(self):
self.update()
with open(ConfigFacialId.DATASET_FILENAME, 'wb') as f:
pickle.dump(self.all_face_encodings, f)
def print_names(self):
print(self.known_face_names)
|
11494477
|
import collections
kaldi_base = "/scail/group/deeplearning/speech/awni/kaldi-stanford/kaldi-trunk/egs/swbd/s5b/"
# Symbols
laugh = '[laughter]'
noise = '[noise]'
voc_noise = '[vocalized-noise]'
space = '[space]'
# Spell out integers
integers = ['zero','one','two','three','four','five','six','seven','eight','nine']
def unique_tokens():
"""
Reads swbd transcripts and stores unique tokens.
"""
with open('data/train/text','r') as fid:
lines = [l.strip().split()[1:] for l in fid.readlines()]
tokens = collections.defaultdict(int)
for i,line in enumerate(lines):
for l in line:
if l == laugh or l==noise or l==voc_noise:
tokens[l] += 1
else:
for t in list(l):
if t=='_':
continue
try:
int(t)
except ValueError:
# Ignore integers
tokens[t] += 1
tokens[space] += 1
print "Parsed %d lines."%i
fid = open('ctc-utils/chars.txt','w')
for i,k in enumerate(tokens.keys()):
fid.write(k+' '+str(i+1)+'\n')
fid.close()
return tokens
def tokenize(labels,file='data/train/text_ctc'):
"""
Reads swbd transcripts and builds swbd k to list of
integer labels mapping.
"""
with open(file,'r') as fid:
lines = [l.strip().split() for l in fid.readlines()]
data = dict((l[0],l[1:]) for l in lines)
int_labels = [[labels[l] for l in list(i)] for i in integers]
# for every utterance
for k,line in data.iteritems():
newline = []
# for every word in transcription
for i,word in enumerate(line):
# for [noise] etc
if word in labels.keys():
newline.append(labels[word])
else:
# for every char in word
for j,char in enumerate(list(word)):
try:
newline.append(labels[char])
except KeyError:
# Add spelled out integer followed by space
newline += int_labels[int(char)]
if j < len(list(word)) - 1:
newline.append(labels[space])
# Add a space inbetween every word
if i < len(line) -1:
newline.append(labels[space])
data[k] = newline
return data
def write_alis(utts,file=kaldi_base+'exp/train_ctc',numfiles=20):
"""
Takes utterance to alignment mapping and splits it up
into alignment files according to file structure of
training set.
"""
for f in range(1,numfiles+1):
print "writing file %d..."%f
with open(file+'/keys%d.txt'%f,'r') as fid:
keys = [l.strip().split()[0] for l in fid.readlines()]
with open(file+'/alis%d.txt'%f,'w') as fid:
for k in keys:
fid.write(k+" "+" ".join(utts[k])+'\n')
def load_labels():
"""
Loads file with label to integer mapping. Use
unique_tokens to create file.
"""
with open('ctc-utils/chars.txt','r') as fid:
labels = dict(tuple(l.strip().split()) for l in fid.readlines())
return labels
def compute_bigrams():
"""
Compute bigrams with smoothing. Save in bigrams.bin.
"""
import cPickle as pickle
import numpy as np
fid_bg = open(kaldi_base+'exp/train_ctc/bigrams.bin','w')
labels = load_labels()
numLabels = len(labels.keys())
bigrams = np.ones((numLabels,numLabels))
numfiles = 384
for f in range(1,numfiles+1):
print "Reading alis %d."%f
with open('exp/train_ctc/alis%d.txt'%f,'r') as fid:
alis = [l.strip().split()[1:] for l in fid.readlines()]
for v in alis:
for i,j in zip(v[1:],v[:-1]):
bigrams[int(i)-1,int(j)-1] += 1
bigrams = bigrams/np.sum(bigrams,axis=0)
pickle.dump(bigrams,fid_bg)
return bigrams
if __name__=='__main__':
# unique_tokens()
labelset = load_labels()
data = [('train',384),('dev',20)]
for name,num in data:
utts = tokenize(labelset, file=kaldi_base+'data/%s/text_ctc'%name)
write_alis(utts, file=kaldi_base+'exp/%s_ctc'%name,numfiles=num)
|
11494523
|
from PyQt5.QtWidgets import QToolButton, QLineEdit, QFileDialog, QDialog, QRadioButton, QPushButton
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
from PyQt5 import uic
import os
import configparser
from shutil import copytree, rmtree
from pulse.utils import get_new_path
from data.user_input.project.printMessageInput import PrintMessageInput
window_title_1 = "ERROR MESSAGE"
window_title_2 = "WARNING MESSAGE"
class SetProjectAttributesInput(QDialog):
def __init__(self, project, opv, *args, **kwargs):
super().__init__(*args, **kwargs)
uic.loadUi('data/user_input/ui/Project/setProjectAttributesInput.ui', self)
icons_path = 'pulse\\data\\icons\\'
self.icon = QIcon(icons_path + 'add.png')
self.setWindowIcon(self.icon)
self.project = project
self.opv = opv
self.opv.setInputObject(self)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.setWindowModality(Qt.WindowModal)
self.create = False
self.stop = False
self.currentTab = 0
self.userPath = os.path.expanduser('~')
self.current_project_file_path = self.project.file._project_path
self.project_directory = os.path.dirname(self.current_project_file_path)
self.project_name = self.project.file._project_name
self.project_ini = self.project.file._project_base_name
self.current_geometry_path = self.project.file._geometry_path
self.current_material_list_path = self.project.file._material_list_path
self.current_fluid_list_path = self.project.file._fluid_list_path
self.current_conn_path = self.project.file._conn_path
self.current_coord_path_path = self.project.file._coord_path
self.import_type = self.project.file._import_type
self.lineEdit_current_project_name = self.findChild(QLineEdit, 'lineEdit_current_project_name')
self.lineEdit_current_project_folder = self.findChild(QLineEdit, 'lineEdit_current_project_folder')
self.lineEdit_new_project_name = self.findChild(QLineEdit, 'lineEdit_new_project_name')
self.lineEdit_new_project_folder = self.findChild(QLineEdit, 'lineEdit_new_project_folder')
self.lineEdit_current_project_name.setText(self.project_name)
self.lineEdit_current_project_folder.setText(self.project_directory)
self.toolButton_clean_project_name = self.findChild(QToolButton, 'toolButton_clean_project_name')
self.toolButton_clean_project_name.clicked.connect(self.clean_project_name)
self.toolButton_search_project_folder = self.findChild(QToolButton, 'toolButton_search_project_folder')
self.toolButton_search_project_folder.clicked.connect(self.search_project_folder)
self.radioButton_projectName = self.findChild(QRadioButton, 'radioButton_projectName')
self.radioButton_projectDirectory = self.findChild(QRadioButton, 'radioButton_projectDirectory')
self.radioButton_projectNameDirectory = self.findChild(QRadioButton, 'radioButton_projectNameDirectory')
self.radioButton_projectName.toggled.connect(self.update_texts_and_controls)
self.radioButton_projectDirectory.toggled.connect(self.update_texts_and_controls)
self.radioButton_projectNameDirectory.toggled.connect(self.update_texts_and_controls)
self.radioButton_maintain_current_project_folder = self.findChild(QRadioButton, 'radioButton_maintain_current_project_folder')
self.radioButton_remove_current_project_folder = self.findChild(QRadioButton, 'radioButton_remove_current_project_folder')
self.pushButton_confirm = self.findChild(QPushButton, 'pushButton_confirm')
self.pushButton_confirm.clicked.connect(self.check_entries_and_confirm)
self.pushButton_cancel = self.findChild(QPushButton, 'pushButton_cancel')
self.pushButton_cancel.clicked.connect(self.cancel_and_close)
if self.radioButton_projectName.isChecked():
self.lineEdit_new_project_folder.setText(self.project_directory)
self.lineEdit_new_project_folder.setDisabled(True)
self.toolButton_search_project_folder.setDisabled(True)
self.exec_()
def update_texts_and_controls(self):
if self.radioButton_projectName.isChecked():
self.lineEdit_new_project_folder.setText(self.project_directory)
self.lineEdit_new_project_name.setText("")
self.lineEdit_new_project_name.setDisabled(False)
self.lineEdit_new_project_folder.setDisabled(True)
self.toolButton_clean_project_name.setDisabled(False)
self.toolButton_search_project_folder.setDisabled(True)
elif self.radioButton_projectDirectory.isChecked():
self.lineEdit_new_project_folder.setText("")
self.lineEdit_new_project_name.setText(self.project_name)
self.lineEdit_new_project_name.setDisabled(True)
self.lineEdit_new_project_folder.setDisabled(False)
self.toolButton_clean_project_name.setDisabled(True)
self.toolButton_search_project_folder.setDisabled(False)
elif self.radioButton_projectNameDirectory.isChecked():
if self.lineEdit_new_project_name.text() == self.lineEdit_current_project_name.text():
self.lineEdit_new_project_name.setText("")
if self.lineEdit_new_project_folder.text() == self.lineEdit_current_project_folder.text():
self.lineEdit_new_project_folder.setText("")
self.lineEdit_new_project_name.setDisabled(False)
self.toolButton_clean_project_name.setDisabled(False)
self.lineEdit_new_project_folder.setDisabled(False)
self.toolButton_search_project_folder.setDisabled(False)
def cancel_and_close(self):
self.close()
def clean_project_name(self):
self.lineEdit_new_project_name.setText("")
def check_entries_and_confirm(self):
self.check_modification_type()
if self.lineEdit_new_project_name.text() != "":
if self.lineEdit_new_project_folder.text() != "":
self.copyTreeProjectFiles()
self.update_all_file_paths()
self.update_project_ini_name()
if self.radioButton_remove_current_project_folder.isChecked():
rmtree(self.current_project_file_path)
self.close()
else:
self.search_project_folder()
return self.check_entries_and_confirm()
else:
message_title = "Empty project name"
message = "Please, inform a valid project name at 'New project name' input field to continue."
PrintMessageInput([message_title, message, window_title_2])
def search_project_folder(self):
self.new_project_directory = QFileDialog.getExistingDirectory(None, 'Choose a new folder to save the project files', self.userPath)
self.lineEdit_new_project_folder.setText(str(self.new_project_directory))
def copyTreeProjectFiles(self):
self.new_project_folder_path = get_new_path(self.new_project_folder, self.new_project_name)
copytree(self.current_project_file_path, self.new_project_folder_path)
def update_all_file_paths(self):
new_geometry_path = get_new_path(self.new_project_folder_path, os.path.basename(self.current_geometry_path))
new_material_list_path = get_new_path(self.new_project_folder_path, os.path.basename(self.current_material_list_path))
new_fluid_list_path = get_new_path(self.new_project_folder_path, os.path.basename(self.current_fluid_list_path))
if self.import_type == 0:
self.project.copy_project( self.new_project_folder_path,
self.new_project_name,
new_material_list_path,
new_fluid_list_path,
geometry_path = new_geometry_path)
elif self.import_type == 1:
pass
def check_modification_type(self):
if self.radioButton_projectName.isChecked():
self.new_project_folder = self.lineEdit_current_project_folder.text()
self.new_project_name = self.lineEdit_new_project_name.text()
self.current_project_folder = self.lineEdit_current_project_folder.text()
self.current_project_name = self.lineEdit_current_project_name.text()
elif self.radioButton_projectDirectory.isChecked():
self.new_project_folder = self.lineEdit_new_project_folder.text()
self.new_project_name = self.lineEdit_current_project_name.text()
self.current_project_folder = self.lineEdit_current_project_folder.text()
self.current_project_name = self.lineEdit_current_project_name.text()
elif self.radioButton_projectNameDirectory.isChecked():
self.new_project_folder = self.lineEdit_new_project_folder.text()
self.new_project_name = self.lineEdit_new_project_name.text()
self.current_project_folder = self.lineEdit_current_project_folder.text()
self.current_project_name = self.lineEdit_current_project_name.text()
def update_project_ini_name(self):
project_ini_file_path = get_new_path(self.new_project_folder_path, self.project_ini)
config = configparser.ConfigParser()
config.read(project_ini_file_path)
config['PROJECT']['Name'] = self.new_project_name
with open(project_ini_file_path, 'w') as config_file:
config.write(config_file)
|
11494561
|
from pypy.jit.codegen.i386.ri386 import *
from pypy.jit.codegen.i386.codebuf import MachineCodeBlock, memhandler
def test_alloc_free():
map_size = 65536
data = memhandler.alloc(map_size)
for i in range(0, map_size, 171):
data[i] = chr(i & 0xff)
for i in range(0, map_size, 171):
assert data[i] == chr(i & 0xff)
memhandler.free(data, map_size)
def test_machinecodeblock():
mc = MachineCodeBlock(4096)
mc.MOV(eax, mem(esp, 4))
mc.SUB(eax, mem(esp, 8))
mc.RET()
res = mc.execute(44, 2)
assert res == 42
return res
def test_compile():
from pypy.translator.c.test.test_genc import compile
fn = compile(test_machinecodeblock, [])
res = fn()
assert res == 42
|
11494589
|
from typing import Tuple
from nutshell.preprocessing.cleaner import BaseCleaner, NLTKCleaner
from nutshell.preprocessing.tokenizer import BaseTokenizer, Token, NLTKTokenizer
class TextPreProcessor:
def __init__(self, tokenizer: BaseTokenizer = NLTKTokenizer(), cleaner: BaseCleaner = NLTKCleaner()):
"""
TextPreprocessor class is responsible performing tokenization and apply transforms using the cleaner.
:param tokenizer: Tokenizer object which preforms text tokenization. By default NLTKTokenizer is used.
:param cleaner: Cleaner object which performs cleaning methods on the tokens. By default NLTKCleaner() is used.
"""
self.__cleaner = cleaner
self.__tokenizer = tokenizer
def __repr__(self):
return f"""TextPreProcessor(cleaner={self.__cleaner}, tokenizer={self.__tokenizer})"""
def preprocess(self, corpus) -> Tuple[Token, Token]:
"""
Preprocesses the corpus by invoking the tokenize and clean methods
:return: Tokens and cleaned tokens
"""
original_tokens = self.__tokenizer.tokenize(corpus)
return original_tokens, self.__cleaner.clean(original_tokens)
|
11494599
|
from dataclasses import dataclass
@dataclass
class Guardian:
""" A guardian or parent in the league """
first_name: str
last_name: str
|
11494705
|
from descontos import (
DescontoCincoItens,
DescontoMaisDeQuinhentosReais,
SemDesconto,
)
class CalculadorDescontos:
def calcula(self, orcamento):
return DescontoCincoItens(
DescontoMaisDeQuinhentosReais(SemDesconto())
).calcula(orcamento)
if __name__ == '__main__':
from orcamento import Orcamento, Item
orcamento = Orcamento()
orcamento.adiciona_item(Item('item 0', 100.0))
orcamento.adiciona_item(Item('item 1', 100.0))
orcamento.adiciona_item(Item('item 2', 100.0))
orcamento.adiciona_item(Item('item 3', 100.0))
orcamento.adiciona_item(Item('item 4', 100.0))
orcamento.adiciona_item(Item('item 5', 100.0))
orcamento.adiciona_item(Item('item 6', 100.0))
orcamento.adiciona_item(Item('item 7', 100.0))
orcamento.adiciona_item(Item('item 8', 100.0))
orcamento.adiciona_item(Item('item 9', 100.0))
print(orcamento.valor)
calculator = CalculadorDescontos()
desconto = calculator.calcula(orcamento)
print(f'Desconto calculado {desconto}')
|
11494760
|
from __future__ import absolute_import
from past.builtins import unicode
import argparse
import logging
import os
import re
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io.iobase import Write
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from sparkles import sparkles
from sparkles.sink import Utf8TextSink
class WordExtractingDoFn(beam.DoFn):
"""Parse each line of input text into words."""
def process(self, element):
"""Returns an iterator over the words of this element.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the element being processed
Returns:
The processed element.
"""
return re.findall(r'[\w\']+', element, re.UNICODE)
def format_output(w, c):
# USING A CUSTOM LIBRARY
output = sparkles.add_sparkles(w, c)
logging.info('OUTPUT %s', output)
return output
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file.
lines = p | 'Read' >> ReadFromText(known_args.input)
output = (
lines
| 'Split' >> (beam.ParDo(WordExtractingDoFn()).with_output_types(unicode))
| 'PairWIthOne' >> beam.Map(lambda x: (x, 1))
| 'GroupAndSum' >> beam.CombinePerKey(sum)
# For Logging Purposes
| 'Format' >> beam.MapTuple(format_output))
# A custom text sink so it displays nicely in GCS :(
output | 'Write' >> Write(Utf8TextSink(known_args.output))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
|
11494781
|
import rosbag
import subprocess
from subprocess import Popen
import sys
import signal
import time
import os
import sys
process = None
def start(topics, filename, folder='~/bags/'):
global process
folder = os.path.expanduser(folder)
if not os.path.isdir(folder):
os.mkdir(folder)
os.chdir(os.path.expanduser(folder))
command = ['rosbag', 'record', '-O', filename]
command.extend(topics)
process = Popen('exec ' + ' '.join(command), stdin=subprocess.PIPE, stdout=subprocess.PIPE, preexec_fn=os.setsid, shell=True)
def stop():
global process
if process is not None:
print("KILLING PROCESS")
os.killpg(os.getpgid(process.pid), signal.SIGINT)
time.sleep(0.1)
def sigint_handler(*args):
print("Stopping rosbag")
stop()
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
|
11494787
|
from ..factory import Type
class messagePassportDataSent(Type):
types = None # type: "vector<PassportElementType>"
|
11494803
|
import ui, scene
img_label = 'dog'
def set_dog(sender):
global img_label
img_label = 'dog'
def set_cat(sender):
global img_label
img_label = 'cat'
class MyScene(scene.Scene):
def setup(self):
print(self.size)
print(self.bounds)
print(self.view.frame)
center = self.size/2
print(center)
self.background_color = 'gray'
self.sprite_label = 'dog'
self.sprite = scene.SpriteNode('Dog_Face',
position=(0, 400),
anchor_point=(0, 1),
#position=(0, 300),
#anchor_point=(0, 0),
#position=(400, 400),
#anchor_point=(1, 1),
##position=(400, 200),
#anchor_point=(1, 0),
parent=self)
print(self.sprite.position)
self.label_node = scene.LabelNode('Hello World',
#position=(0, 400),
#anchor_point=(0, 1),
position=(0, 300),
anchor_point=(0, 0),
#position=(400, 400),
#anchor_point=(1, 1),
##position=(400, 200),
#anchor_point=(1, 0),
parent=self)
def update(self):
global img_label
if self.sprite_label != img_label:
if img_label == 'dog':
self.sprite_label = 'dog'
self.sprite.texture = scene.Texture('Dog_Face')
else:
self.sprite_label = 'cat'
self.sprite.texture = scene.Texture('Cat_Face')
v = ui.load_view()
frame = v.frame
scene_view = scene.SceneView()
#scene_view.flex= 'WH'
scene_view.width = frame.w
scene_view.height = frame.h
scene_view.scene = MyScene()
v['view1'].add_subview(scene_view)
print('bb',v['view1'].frame)
v.present('sheet')
|
11494816
|
import os
import logging
from logging import Logger
import hashlib
from pathlib import Path
import _pickle as cPickle
from collections import defaultdict
from typing import List, Dict, Optional, NamedTuple, Union
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset
from transformers import BertTokenizer
from kyoto_reader import KyotoReader, Document, ALL_EXOPHORS
from .read_example import PasExample
from utils.constants import TASK_ID
class InputFeatures(NamedTuple):
input_ids: List[int]
input_mask: List[bool]
segment_ids: List[int]
arguments_set: List[List[List[int]]]
overt_mask: List[List[List[int]]]
ng_token_mask: List[List[List[bool]]]
deps: List[List[int]]
class PASDataset(Dataset):
def __init__(self,
path: Union[str, Path],
cases: List[str],
exophors: List[str],
coreference: bool,
bridging: bool,
max_seq_length: int,
bert_path: Union[str, Path],
training: bool,
kc: bool,
train_targets: List[str],
pas_targets: List[str],
n_jobs: int = -1,
logger=None,
gold_path: Optional[str] = None,
) -> None:
self.path = Path(path)
self.reader = KyotoReader(self.path, extract_nes=False, n_jobs=n_jobs)
self.target_cases: List[str] = [c for c in cases if c in self.reader.target_cases and c != 'ノ']
self.target_exophors: List[str] = [e for e in exophors if e in ALL_EXOPHORS]
self.coreference: bool = coreference
self.bridging: bool = bridging
self.relations = self.target_cases + ['ノ'] * bridging + ['='] * coreference
self.kc: bool = kc
self.train_targets: List[str] = [t if t != 'case' else 'dep' for t in train_targets] # backward compatibility
self.pas_targets: List[str] = pas_targets
self.logger: Logger = logger or logging.getLogger(__file__)
special_tokens = self.target_exophors + ['NULL'] + (['NA'] if coreference else [])
self.special_to_index: Dict[str, int] = {token: max_seq_length - i - 1 for i, token
in enumerate(reversed(special_tokens))}
self.tokenizer = BertTokenizer.from_pretrained(bert_path, do_lower_case=False, tokenize_chinese_chars=False)
self.max_seq_length: int = max_seq_length
self.bert_path: Path = Path(bert_path)
documents = list(self.reader.process_all_documents())
if not training:
self.documents: Optional[List[Document]] = documents
if gold_path is not None:
reader = KyotoReader(Path(gold_path), extract_nes=False, n_jobs=n_jobs)
self.gold_documents = list(reader.process_all_documents())
self.examples = self._load(documents, str(path))
def _load(self, documents: List[Document], path: str) -> List[PasExample]:
examples: List[PasExample] = []
load_cache: bool = ('BPA_DISABLE_CACHE' not in os.environ and 'BPA_OVERWRITE_CACHE' not in os.environ)
save_cache: bool = ('BPA_DISABLE_CACHE' not in os.environ)
bpa_cache_dir: Path = Path(os.environ.get('BPA_CACHE_DIR', f'/tmp/{os.environ["USER"]}/bpa_cache'))
for document in tqdm(documents, desc='processing documents'):
hash_ = self._hash(document, path, self.relations, self.target_exophors, self.kc, self.pas_targets,
self.train_targets, str(self.bert_path))
example_cache_path = bpa_cache_dir / hash_ / f'{document.doc_id}.pkl'
if example_cache_path.exists() and load_cache:
with example_cache_path.open('rb') as f:
example = cPickle.load(f)
else:
example = PasExample()
example.load(document,
cases=self.target_cases,
exophors=self.target_exophors,
coreference=self.coreference,
bridging=self.bridging,
relations=self.relations,
kc=self.kc,
pas_targets=self.pas_targets,
tokenizer=self.tokenizer)
if save_cache:
example_cache_path.parent.mkdir(exist_ok=True, parents=True)
with example_cache_path.open('wb') as f:
cPickle.dump(example, f)
# ignore too long document
if len(example.tokens) > self.max_seq_length - len(self.special_to_index):
continue
examples.append(example)
if len(examples) == 0:
self.logger.error('No examples to process. '
f'Make sure there exist any documents in {self.path} and they are not too long.')
return examples
@staticmethod
def _hash(document, *args) -> str:
attrs = ('cases', 'corefs', 'relax_cases', 'extract_nes', 'use_pas_tag')
assert set(attrs) <= set(vars(document).keys())
vars_document = {k: v for k, v in vars(document).items() if k in attrs}
string = repr(sorted(vars_document)) + ''.join(repr(a) for a in args)
return hashlib.md5(string.encode()).hexdigest()
def _convert_example_to_feature(self,
example: PasExample,
) -> InputFeatures:
"""Loads a data file into a list of `InputBatch`s."""
vocab_size = self.tokenizer.vocab_size
max_seq_length = self.max_seq_length
num_special_tokens = len(self.special_to_index)
num_relations = len(self.relations)
tokens = example.tokens
tok_to_orig_index = example.tok_to_orig_index
orig_to_tok_index = example.orig_to_tok_index
arguments_set: List[List[List[int]]] = []
candidates_set: List[List[List[int]]] = []
overts_set: List[List[List[int]]] = []
deps: List[List[int]] = []
# subword loop
for token, orig_index in zip(tokens, tok_to_orig_index):
if orig_index is None:
deps.append([0] * max_seq_length)
else:
ddep = example.ddeps[orig_index] # orig_index の係り先の dtid
# orig_index の係り先になっている基本句を構成する全てのトークンに1が立つ
deps.append([(0 if idx is None or ddep != example.dtids[idx] else 1) for idx in tok_to_orig_index])
deps[-1] += [0] * (max_seq_length - len(tok_to_orig_index))
# subsequent subword or [CLS] token or [SEP] token
if token.startswith("##") or orig_index is None:
arguments_set.append([[] for _ in range(num_relations)])
overts_set.append([[] for _ in range(num_relations)])
candidates_set.append([[] for _ in range(num_relations)])
continue
arguments: List[List[int]] = [[] for _ in range(num_relations)]
overts: List[List[int]] = [[] for _ in range(num_relations)]
for i, (rel, arg_strings) in enumerate(example.arguments_set[orig_index].items()):
for arg_string in arg_strings:
# arg_string: 著者, 8%C, 15%O, 2, NULL, ...
flag = None
if arg_string[-2:] in ('%C', '%N', '%O'):
flag = arg_string[-1]
arg_string = arg_string[:-2]
if arg_string in self.special_to_index:
tok_index = self.special_to_index[arg_string]
else:
tok_index = orig_to_tok_index[int(arg_string)]
if rel in self.target_cases:
if arg_string in self.target_exophors and 'zero' not in self.train_targets:
continue
if flag == 'C':
overts[i].append(tok_index)
if (flag == 'C' and 'overt' not in self.train_targets) or \
(flag == 'N' and 'dep' not in self.train_targets) or \
(flag == 'O' and 'zero' not in self.train_targets):
continue
arguments[i].append(tok_index)
arguments_set.append(arguments)
overts_set.append(overts)
# 助詞などに対しても特殊トークンを candidates として加える
candidates: List[List[int]] = []
for rel in self.relations:
if rel != '=':
cands = [orig_to_tok_index[dmid] for dmid in example.arg_candidates_set[orig_index]]
specials = self.target_exophors + ['NULL']
else:
cands = [orig_to_tok_index[dmid] for dmid in example.ment_candidates_set[orig_index]]
specials = self.target_exophors + ['NA']
cands += [self.special_to_index[special] for special in specials]
candidates.append(cands)
candidates_set.append(candidates)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
input_mask = [True] * len(input_ids)
# Zero-pad up to the sequence length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(False)
arguments_set.append([[]] * num_relations)
overts_set.append([[]] * num_relations)
candidates_set.append([[]] * num_relations)
deps.append([0] * max_seq_length)
# special tokens
for i in range(num_special_tokens):
pos = max_seq_length - num_special_tokens + i
input_ids[pos] = vocab_size + i
input_mask[pos] = True
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(arguments_set) == max_seq_length
assert len(overts_set) == max_seq_length
assert len(candidates_set) == max_seq_length
assert len(deps) == max_seq_length
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=[0] * max_seq_length,
arguments_set=[[[int(x in args) for x in range(max_seq_length)] for args in arguments]
for arguments in arguments_set],
overt_mask=[[[(x in overt) for x in range(max_seq_length)] for overt in overts]
for overts in overts_set],
ng_token_mask=[[[(x in cands) for x in range(max_seq_length)] for cands in candidates]
for candidates in candidates_set], # False -> mask, True -> keep
deps=deps,
)
return feature
def stat(self) -> dict:
n_mentions = 0
pa: Dict[str, Union[int, dict]] = defaultdict(int)
bar: Dict[str, Union[int, dict]] = defaultdict(int)
cr: Dict[str, Union[int, dict]] = defaultdict(int)
n_args_bar = defaultdict(int)
n_args_pa = defaultdict(lambda: defaultdict(int))
for arguments in (x for example in self.examples for x in example.arguments_set):
for case, args in arguments.items():
if not args:
continue
arg: str = args[0]
if case == '=':
if arg == 'NA':
cr['na'] += 1
continue
n_mentions += 1
if arg in self.target_exophors:
cr['exo'] += 1
else:
cr['ana'] += 1
else:
n_args = n_args_bar if case == 'ノ' else n_args_pa[case]
if arg == 'NULL':
n_args['null'] += 1
continue
n_args['all'] += 1
if arg in self.target_exophors:
n_args['exo'] += 1
elif '%C' in arg:
n_args['overt'] += 1
elif '%N' in arg:
n_args['dep'] += 1
elif '%O' in arg:
n_args['zero'] += 1
arguments_: List[List[str]] = list(arguments.values())
if self.coreference:
if arguments_[-1]:
cr['mentions_all'] += 1
if [arg for arg in arguments_[-1] if arg != 'NA']:
cr['mentions_tagged'] += 1
arguments_ = arguments_[:-1]
if self.bridging:
if arguments_[-1]:
bar['preds_all'] += 1
if [arg for arg in arguments_[-1] if arg != 'NULL']:
bar['preds_tagged'] += 1
arguments_ = arguments_[:-1]
if any(arguments_):
pa['preds_all'] += 1
if [arg for args in arguments_ for arg in args if arg != 'NULL']:
pa['preds_tagged'] += 1
n_args_pa_all = defaultdict(int)
for case, ans in n_args_pa.items():
for anal, num in ans.items():
n_args_pa_all[anal] += num
n_args_pa['all'] = n_args_pa_all
pa['args'] = n_args_pa
bar['args'] = n_args_bar
cr['mentions'] = n_mentions
return {'examples': len(self.examples),
'pas': pa,
'bridging': bar,
'coreference': cr,
'sentences': sum(len(doc) for doc in self.gold_documents) if self.gold_documents else None,
'bps': sum(len(doc.bp_list()) for doc in self.gold_documents) if self.gold_documents else None,
'tokens': sum(len(example.tokens) - 2 for example in self.examples),
}
def __len__(self) -> int:
return len(self.examples)
def __getitem__(self, idx) -> tuple:
feature = self._convert_example_to_feature(self.examples[idx])
input_ids = np.array(feature.input_ids) # (seq)
attention_mask = np.array(feature.input_mask) # (seq)
segment_ids = np.array(feature.segment_ids) # (seq)
arguments_ids = np.array(feature.arguments_set) # (seq, case, seq)
overt_mask = np.array(feature.overt_mask) # (seq, case, seq)
ng_token_mask = np.array(feature.ng_token_mask) # (seq, case, seq)
deps = np.array(feature.deps) # (seq, seq)
task = np.array(TASK_ID['pa']) # ()
return input_ids, attention_mask, segment_ids, ng_token_mask, arguments_ids, deps, task, overt_mask
|
11494823
|
from .helper import PillowTestCase, hopper
import datetime
from PIL import Image, ImageMode
from io import BytesIO
import os
try:
from PIL import ImageCms
from PIL.ImageCms import ImageCmsProfile
ImageCms.core.profile_open
except ImportError:
# Skipped via setUp()
pass
SRGB = "Tests/icc/sRGB_IEC61966-2-1_black_scaled.icc"
HAVE_PROFILE = os.path.exists(SRGB)
class TestImageCms(PillowTestCase):
def setUp(self):
try:
from PIL import ImageCms
# need to hit getattr to trigger the delayed import error
ImageCms.core.profile_open
except ImportError as v:
self.skipTest(v)
def skip_missing(self):
if not HAVE_PROFILE:
self.skipTest("SRGB profile not available")
def test_sanity(self):
# basic smoke test.
# this mostly follows the cms_test outline.
v = ImageCms.versions() # should return four strings
self.assertEqual(v[0], '1.0.0 pil')
self.assertEqual(list(map(type, v)), [str, str, str, str])
# internal version number
self.assertRegex(ImageCms.core.littlecms_version, r"\d+\.\d+$")
self.skip_missing()
i = ImageCms.profileToProfile(hopper(), SRGB, SRGB)
self.assert_image(i, "RGB", (128, 128))
i = hopper()
ImageCms.profileToProfile(i, SRGB, SRGB, inPlace=True)
self.assert_image(i, "RGB", (128, 128))
t = ImageCms.buildTransform(SRGB, SRGB, "RGB", "RGB")
i = ImageCms.applyTransform(hopper(), t)
self.assert_image(i, "RGB", (128, 128))
i = hopper()
t = ImageCms.buildTransform(SRGB, SRGB, "RGB", "RGB")
ImageCms.applyTransform(hopper(), t, inPlace=True)
self.assert_image(i, "RGB", (128, 128))
p = ImageCms.createProfile("sRGB")
o = ImageCms.getOpenProfile(SRGB)
t = ImageCms.buildTransformFromOpenProfiles(p, o, "RGB", "RGB")
i = ImageCms.applyTransform(hopper(), t)
self.assert_image(i, "RGB", (128, 128))
t = ImageCms.buildProofTransform(SRGB, SRGB, SRGB, "RGB", "RGB")
self.assertEqual(t.inputMode, "RGB")
self.assertEqual(t.outputMode, "RGB")
i = ImageCms.applyTransform(hopper(), t)
self.assert_image(i, "RGB", (128, 128))
# test PointTransform convenience API
hopper().point(t)
def test_name(self):
self.skip_missing()
# get profile information for file
self.assertEqual(
ImageCms.getProfileName(SRGB).strip(),
'IEC 61966-2-1 Default RGB Colour Space - sRGB')
def test_info(self):
self.skip_missing()
self.assertEqual(
ImageCms.getProfileInfo(SRGB).splitlines(), [
'sRGB IEC61966-2-1 black scaled', '',
'Copyright International Color Consortium, 2009', ''])
def test_copyright(self):
self.skip_missing()
self.assertEqual(
ImageCms.getProfileCopyright(SRGB).strip(),
'Copyright International Color Consortium, 2009')
def test_manufacturer(self):
self.skip_missing()
self.assertEqual(
ImageCms.getProfileManufacturer(SRGB).strip(),
'')
def test_model(self):
self.skip_missing()
self.assertEqual(
ImageCms.getProfileModel(SRGB).strip(),
'IEC 61966-2-1 Default RGB Colour Space - sRGB')
def test_description(self):
self.skip_missing()
self.assertEqual(
ImageCms.getProfileDescription(SRGB).strip(),
'sRGB IEC61966-2-1 black scaled')
def test_intent(self):
self.skip_missing()
self.assertEqual(ImageCms.getDefaultIntent(SRGB), 0)
self.assertEqual(ImageCms.isIntentSupported(
SRGB, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC,
ImageCms.DIRECTION_INPUT), 1)
def test_profile_object(self):
# same, using profile object
p = ImageCms.createProfile("sRGB")
# self.assertEqual(ImageCms.getProfileName(p).strip(),
# 'sRGB built-in - (lcms internal)')
# self.assertEqual(ImageCms.getProfileInfo(p).splitlines(),
# ['sRGB built-in', '', 'WhitePoint : D65 (daylight)', '', ''])
self.assertEqual(ImageCms.getDefaultIntent(p), 0)
self.assertEqual(ImageCms.isIntentSupported(
p, ImageCms.INTENT_ABSOLUTE_COLORIMETRIC,
ImageCms.DIRECTION_INPUT), 1)
def test_extensions(self):
# extensions
i = Image.open("Tests/images/rgb.jpg")
p = ImageCms.getOpenProfile(BytesIO(i.info["icc_profile"]))
self.assertEqual(
ImageCms.getProfileName(p).strip(),
'IEC 61966-2.1 Default RGB colour space - sRGB')
def test_exceptions(self):
# Test mode mismatch
psRGB = ImageCms.createProfile("sRGB")
pLab = ImageCms.createProfile("LAB")
t = ImageCms.buildTransform(pLab, psRGB, "LAB", "RGB")
self.assertRaises(ValueError, t.apply_in_place, hopper("RGBA"))
# the procedural pyCMS API uses PyCMSError for all sorts of errors
self.assertRaises(
ImageCms.PyCMSError,
ImageCms.profileToProfile, hopper(), "foo", "bar")
self.assertRaises(
ImageCms.PyCMSError,
ImageCms.buildTransform, "foo", "bar", "RGB", "RGB")
self.assertRaises(
ImageCms.PyCMSError,
ImageCms.getProfileName, None)
self.skip_missing()
self.assertRaises(
ImageCms.PyCMSError,
ImageCms.isIntentSupported, SRGB, None, None)
def test_display_profile(self):
# try fetching the profile for the current display device
ImageCms.get_display_profile()
def test_lab_color_profile(self):
ImageCms.createProfile("LAB", 5000)
ImageCms.createProfile("LAB", 6500)
def test_unsupported_color_space(self):
self.assertRaises(ImageCms.PyCMSError,
ImageCms.createProfile, "unsupported")
def test_invalid_color_temperature(self):
self.assertRaises(ImageCms.PyCMSError,
ImageCms.createProfile, "LAB", "invalid")
def test_simple_lab(self):
i = Image.new('RGB', (10, 10), (128, 128, 128))
psRGB = ImageCms.createProfile("sRGB")
pLab = ImageCms.createProfile("LAB")
t = ImageCms.buildTransform(psRGB, pLab, "RGB", "LAB")
i_lab = ImageCms.applyTransform(i, t)
self.assertEqual(i_lab.mode, 'LAB')
k = i_lab.getpixel((0, 0))
# not a linear luminance map. so L != 128:
self.assertEqual(k, (137, 128, 128))
l_data = i_lab.getdata(0)
a_data = i_lab.getdata(1)
b_data = i_lab.getdata(2)
self.assertEqual(list(l_data), [137] * 100)
self.assertEqual(list(a_data), [128] * 100)
self.assertEqual(list(b_data), [128] * 100)
def test_lab_color(self):
psRGB = ImageCms.createProfile("sRGB")
pLab = ImageCms.createProfile("LAB")
t = ImageCms.buildTransform(psRGB, pLab, "RGB", "LAB")
# Need to add a type mapping for some PIL type to TYPE_Lab_8 in
# findLCMSType, and have that mapping work back to a PIL mode
# (likely RGB).
i = ImageCms.applyTransform(hopper(), t)
self.assert_image(i, "LAB", (128, 128))
# i.save('temp.lab.tif') # visually verified vs PS.
target = Image.open('Tests/images/hopper.Lab.tif')
self.assert_image_similar(i, target, 3.5)
def test_lab_srgb(self):
psRGB = ImageCms.createProfile("sRGB")
pLab = ImageCms.createProfile("LAB")
t = ImageCms.buildTransform(pLab, psRGB, "LAB", "RGB")
img = Image.open('Tests/images/hopper.Lab.tif')
img_srgb = ImageCms.applyTransform(img, t)
# img_srgb.save('temp.srgb.tif') # visually verified vs ps.
self.assert_image_similar(hopper(), img_srgb, 30)
self.assertTrue(img_srgb.info['icc_profile'])
profile = ImageCmsProfile(BytesIO(img_srgb.info['icc_profile']))
self.assertIn('sRGB', ImageCms.getProfileDescription(profile))
def test_lab_roundtrip(self):
# check to see if we're at least internally consistent.
psRGB = ImageCms.createProfile("sRGB")
pLab = ImageCms.createProfile("LAB")
t = ImageCms.buildTransform(psRGB, pLab, "RGB", "LAB")
t2 = ImageCms.buildTransform(pLab, psRGB, "LAB", "RGB")
i = ImageCms.applyTransform(hopper(), t)
self.assertEqual(i.info['icc_profile'],
ImageCmsProfile(pLab).tobytes())
out = ImageCms.applyTransform(i, t2)
self.assert_image_similar(hopper(), out, 2)
def test_profile_tobytes(self):
i = Image.open("Tests/images/rgb.jpg")
p = ImageCms.getOpenProfile(BytesIO(i.info["icc_profile"]))
p2 = ImageCms.getOpenProfile(BytesIO(p.tobytes()))
# not the same bytes as the original icc_profile,
# but it does roundtrip
self.assertEqual(p.tobytes(), p2.tobytes())
self.assertEqual(ImageCms.getProfileName(p),
ImageCms.getProfileName(p2))
self.assertEqual(ImageCms.getProfileDescription(p),
ImageCms.getProfileDescription(p2))
def test_extended_information(self):
self.skip_missing()
o = ImageCms.getOpenProfile(SRGB)
p = o.profile
def assert_truncated_tuple_equal(tup1, tup2, digits=10):
# Helper function to reduce precision of tuples of floats
# recursively and then check equality.
power = 10 ** digits
def truncate_tuple(tuple_or_float):
return tuple(
truncate_tuple(val) if isinstance(val, tuple)
else int(val * power) / power for val in tuple_or_float)
self.assertEqual(truncate_tuple(tup1), truncate_tuple(tup2))
self.assertEqual(p.attributes, 4294967296)
assert_truncated_tuple_equal(
p.blue_colorant,
((0.14306640625, 0.06060791015625, 0.7140960693359375),
(0.1558847490315394, 0.06603820639433387, 0.06060791015625)))
assert_truncated_tuple_equal(
p.blue_primary,
((0.14306641366715667, 0.06060790921083026, 0.7140960805782015),
(0.15588475410450106, 0.06603820408959558, 0.06060790921083026)))
assert_truncated_tuple_equal(
p.chromatic_adaptation,
(((1.04791259765625, 0.0229339599609375, -0.050201416015625),
(0.02960205078125, 0.9904632568359375, -0.0170745849609375),
(-0.009246826171875, 0.0150604248046875, 0.7517852783203125)),
((1.0267159024652783, 0.022470062342089134, 0.0229339599609375),
(0.02951378324103937, 0.9875098886387147, 0.9904632568359375),
(-0.012205438066465256, 0.01987915407854985, 0.0150604248046875))))
self.assertIsNone(p.chromaticity)
self.assertEqual(p.clut, {
0: (False, False, True),
1: (False, False, True),
2: (False, False, True),
3: (False, False, True)
})
self.assertIsNone(p.colorant_table)
self.assertIsNone(p.colorant_table_out)
self.assertIsNone(p.colorimetric_intent)
self.assertEqual(p.connection_space, 'XYZ ')
self.assertEqual(p.copyright,
'Copyright International Color Consortium, 2009')
self.assertEqual(p.creation_date,
datetime.datetime(2009, 2, 27, 21, 36, 31))
self.assertEqual(p.device_class, 'mntr')
assert_truncated_tuple_equal(
p.green_colorant,
((0.3851470947265625, 0.7168731689453125, 0.097076416015625),
(0.32119769927720654, 0.5978443449048152, 0.7168731689453125)))
assert_truncated_tuple_equal(
p.green_primary,
((0.3851470888162112, 0.7168731974161346, 0.09707641738998518),
(0.32119768793686687, 0.5978443567149709, 0.7168731974161346)))
self.assertEqual(p.header_flags, 0)
self.assertEqual(p.header_manufacturer, '\x00\x00\x00\x00')
self.assertEqual(p.header_model, '\x00\x00\x00\x00')
self.assertEqual(p.icc_measurement_condition, {
'backing': (0.0, 0.0, 0.0),
'flare': 0.0,
'geo': 'unknown',
'observer': 1,
'illuminant_type': 'D65'
})
self.assertEqual(p.icc_version, 33554432)
self.assertIsNone(p.icc_viewing_condition)
self.assertEqual(p.intent_supported, {
0: (True, True, True),
1: (True, True, True),
2: (True, True, True),
3: (True, True, True)
})
self.assertTrue(p.is_matrix_shaper)
self.assertEqual(p.luminance, ((0.0, 80.0, 0.0), (0.0, 1.0, 80.0)))
self.assertIsNone(p.manufacturer)
assert_truncated_tuple_equal(
p.media_black_point,
((0.012054443359375, 0.0124969482421875, 0.01031494140625),
(0.34573304157549234, 0.35842450765864337, 0.0124969482421875)))
assert_truncated_tuple_equal(
p.media_white_point,
((0.964202880859375, 1.0, 0.8249053955078125),
(0.3457029219802284, 0.3585375327567059, 1.0)))
assert_truncated_tuple_equal(
(p.media_white_point_temperature,),
(5000.722328847392,))
self.assertEqual(p.model,
'IEC 61966-2-1 Default RGB Colour Space - sRGB')
self.assertIsNone(p.perceptual_rendering_intent_gamut)
self.assertEqual(
p.profile_description, 'sRGB IEC61966-2-1 black scaled')
self.assertEqual(
p.profile_id, b')\xf8=\xde\xaf\xf2U\xaexB\xfa\xe4\xca\x839\r')
assert_truncated_tuple_equal(
p.red_colorant,
((0.436065673828125, 0.2224884033203125, 0.013916015625),
(0.6484536316398539, 0.3308524880306778, 0.2224884033203125)))
assert_truncated_tuple_equal(
p.red_primary,
((0.43606566581047446, 0.22248840582960838, 0.013916015621759925),
(0.6484536250319214, 0.3308524944738204, 0.22248840582960838)))
self.assertEqual(p.rendering_intent, 0)
self.assertIsNone(p.saturation_rendering_intent_gamut)
self.assertIsNone(p.screening_description)
self.assertIsNone(p.target)
self.assertEqual(p.technology, 'CRT ')
self.assertEqual(p.version, 2.0)
self.assertEqual(p.viewing_condition,
'Reference Viewing Condition in IEC 61966-2-1')
self.assertEqual(p.xcolor_space, 'RGB ')
def test_deprecations(self):
self.skip_missing()
o = ImageCms.getOpenProfile(SRGB)
p = o.profile
def helper_deprecated(attr, expected):
result = self.assert_warning(DeprecationWarning, getattr, p, attr)
self.assertEqual(result, expected)
# p.color_space
helper_deprecated("color_space", "RGB")
# p.pcs
helper_deprecated("pcs", "XYZ")
# p.product_copyright
helper_deprecated(
"product_copyright", "Copyright International Color Consortium, 2009"
)
# p.product_desc
helper_deprecated("product_desc", "sRGB IEC61966-2-1 black scaled")
# p.product_description
helper_deprecated("product_description", "sRGB IEC61966-2-1 black scaled")
# p.product_manufacturer
helper_deprecated("product_manufacturer", "")
# p.product_model
helper_deprecated(
"product_model", "IEC 61966-2-1 Default RGB Colour Space - sRGB"
)
def test_profile_typesafety(self):
""" Profile init type safety
prepatch, these would segfault, postpatch they should emit a typeerror
"""
with self.assertRaises(TypeError):
ImageCms.ImageCmsProfile(0).tobytes()
with self.assertRaises(TypeError):
ImageCms.ImageCmsProfile(1).tobytes()
def assert_aux_channel_preserved(self, mode,
transform_in_place, preserved_channel):
def create_test_image():
# set up test image with something interesting in the tested aux
# channel.
nine_grid_deltas = [ # noqa: E128
(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1),
]
chans = []
bands = ImageMode.getmode(mode).bands
for band_ndx in range(len(bands)):
channel_type = 'L' # 8-bit unorm
channel_pattern = hopper(channel_type)
# paste pattern with varying offsets to avoid correlation
# potentially hiding some bugs (like channels getting mixed).
paste_offset = (
int(band_ndx / float(len(bands)) * channel_pattern.size[0]),
int(band_ndx / float(len(bands) * 2) * channel_pattern.size[1])
)
channel_data = Image.new(channel_type, channel_pattern.size)
for delta in nine_grid_deltas:
channel_data.paste(
channel_pattern,
tuple(paste_offset[c] + delta[c] * channel_pattern.size[c]
for c in range(2)),
)
chans.append(channel_data)
return Image.merge(mode, chans)
source_image = create_test_image()
source_image_aux = source_image.getchannel(preserved_channel)
# create some transform, it doesn't matter which one
source_profile = ImageCms.createProfile("sRGB")
destination_profile = ImageCms.createProfile("sRGB")
t = ImageCms.buildTransform(
source_profile, destination_profile, inMode=mode, outMode=mode)
# apply transform
if transform_in_place:
ImageCms.applyTransform(source_image, t, inPlace=True)
result_image = source_image
else:
result_image = ImageCms.applyTransform(
source_image, t, inPlace=False)
result_image_aux = result_image.getchannel(preserved_channel)
self.assert_image_equal(source_image_aux, result_image_aux)
def test_preserve_auxiliary_channels_rgba(self):
self.assert_aux_channel_preserved(
mode='RGBA', transform_in_place=False, preserved_channel='A')
def test_preserve_auxiliary_channels_rgba_in_place(self):
self.assert_aux_channel_preserved(
mode='RGBA', transform_in_place=True, preserved_channel='A')
def test_preserve_auxiliary_channels_rgbx(self):
self.assert_aux_channel_preserved(
mode='RGBX', transform_in_place=False, preserved_channel='X')
def test_preserve_auxiliary_channels_rgbx_in_place(self):
self.assert_aux_channel_preserved(
mode='RGBX', transform_in_place=True, preserved_channel='X')
def test_auxiliary_channels_isolated(self):
# test data in aux channels does not affect non-aux channels
aux_channel_formats = [
# format, profile, color-only format, source test image
('RGBA', 'sRGB', 'RGB', hopper('RGBA')),
('RGBX', 'sRGB', 'RGB', hopper('RGBX')),
('LAB', 'LAB', 'LAB', Image.open('Tests/images/hopper.Lab.tif')),
]
for src_format in aux_channel_formats:
for dst_format in aux_channel_formats:
for transform_in_place in [True, False]:
# inplace only if format doesn't change
if transform_in_place and src_format[0] != dst_format[0]:
continue
# convert with and without AUX data, test colors are equal
source_profile = ImageCms.createProfile(src_format[1])
destination_profile = ImageCms.createProfile(dst_format[1])
source_image = src_format[3]
test_transform = ImageCms.buildTransform(
source_profile, destination_profile,
inMode=src_format[0], outMode=dst_format[0])
# test conversion from aux-ful source
if transform_in_place:
test_image = source_image.copy()
ImageCms.applyTransform(
test_image, test_transform, inPlace=True)
else:
test_image = ImageCms.applyTransform(
source_image, test_transform, inPlace=False)
# reference conversion from aux-less source
reference_transform = ImageCms.buildTransform(
source_profile, destination_profile,
inMode=src_format[2], outMode=dst_format[2])
reference_image = ImageCms.applyTransform(
source_image.convert(src_format[2]),
reference_transform)
self.assert_image_equal(test_image.convert(dst_format[2]),
reference_image)
|
11494836
|
from src.datasets.toxic_spans_tokens import *
from src.datasets.toxic_spans_spans import *
from src.datasets.toxic_spans_tokens_spans import *
from src.datasets.toxic_spans_multi_spans import *
from src.datasets.toxic_spans_crf_tokens import *
|
11494862
|
if sm.getRandomIntBelow(2) == 0:
sm.teleportToPortal(10) # Final portal
else:
sm.warpInstanceOut(993000601, 0) # Hidden Street : Secluded Forest
|
11494881
|
import json
import os
import socket
from resotolib.baseresources import BaseResource
from resotolib.config import Config
from resotolib.graph import Graph
from resotolib.utils import RWLock
import resotolib.logger
from typing import Iterable, List, Union, Callable, Any, Dict
from googleapiclient import discovery
from googleapiclient.errors import HttpError as GoogleApiClientHttpError
from googleapiclient.discovery_cache.base import Cache as GoogleApiClientCache
from google.oauth2 import service_account
from datetime import datetime
from retrying import retry
from tenacity import Retrying, stop_after_attempt, retry_if_exception_type
# from google.oauth2.credentials import UserAccessTokenCredentials
log = resotolib.logger.getLogger("resoto." + __name__)
resotolib.logger.getLogger("googleapiclient").setLevel(resotolib.logger.ERROR)
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
def retry_on_error(e):
if isinstance(e, socket.timeout):
log.debug("Got socket timeout - retrying")
return True
return False
class MemoryCache(GoogleApiClientCache):
_cache = {}
def get(self, url):
return MemoryCache._cache.get(url)
def set(self, url, content):
MemoryCache._cache[url] = content
class Credentials:
_credentials = {}
_initialized = False
_lock = RWLock()
@staticmethod
def load():
with Credentials._lock.write_access:
if not Credentials._initialized:
for sa_data in Config.gcp.service_account:
credentials = load_credentials(sa_data)
for project in list_credential_projects(credentials):
Credentials._credentials[project["id"]] = credentials
Credentials._initialized = True
@staticmethod
def get(project_id: str):
Credentials.load()
with Credentials._lock.read_access:
return Credentials._credentials.get(project_id)
@staticmethod
def all() -> Dict:
Credentials.load()
with Credentials._lock.read_access:
return dict(Credentials._credentials)
@staticmethod
def reload():
with Credentials._lock.write_access:
Credentials._initialized = False
Credentials.load()
def load_credentials(sa_data: str):
if len(sa_data) == 0:
return None
if os.path.isfile(sa_data):
return service_account.Credentials.from_service_account_file(
sa_data, scopes=SCOPES
)
else:
return service_account.Credentials.from_service_account_info(
json.loads(sa_data), scopes=SCOPES
)
@retry(
stop_max_attempt_number=10,
wait_exponential_multiplier=3000,
wait_exponential_max=300000,
retry_on_exception=retry_on_error,
)
def gcp_client(service: str, version: str, credentials: str):
client = discovery.build(
service, version, credentials=credentials, cache=MemoryCache()
)
return client
def list_credential_projects(credentials) -> List:
ret = []
try:
client = gcp_client("cloudresourcemanager", "v1", credentials=credentials)
projects = client.projects()
for project in paginate(projects, "list", "projects"):
ctime = project.get("createTime")
if ctime is not None:
ctime = iso2datetime(ctime)
project_name = project.get("name")
project_id = project.get("projectId")
p = {
"id": project_id,
"name": project_name,
"ctime": ctime,
}
ret.append(p)
except GoogleApiClientHttpError:
log.error(
(
"Unable to load projects from cloudresourcemanager"
" - falling back to local credentials information"
)
)
p = {
"id": credentials.project_id,
"name": credentials.project_id,
}
ret.append(p)
return ret
def iso2datetime(ts: str) -> datetime:
if ts is None:
return
if ts.endswith("Z"):
ts = ts[:-1] + "+00:00"
if ts is not None:
return datetime.fromisoformat(ts)
def paginate(
gcp_resource: Callable,
method_name: str,
items_name: str,
subitems_name: str = None,
exclude_region_resources: bool = False,
**kwargs,
) -> Iterable:
"""Paginate GCP API list and aggregatedList results.
Args:
gcp_resource: GCP resource on which we do our paging
method_name: list method to call. Usually `list` or `aggregatedList`
items_name: Name of the key in our result that contains the list of items.
Usually `items`
subitems_name: When using aggregatedList this contains the actual items.
Usually the same as the gcp_resource name. E.g. `disks` when requesting
disks, `instances` when fetching instances, etc.
exclude_region_resources: Regional resources have their own API and can be
excluded from aggregatedList calls if so desired
"""
next_method_name = method_name + "_next"
method = getattr(gcp_resource, method_name)
request = method(**kwargs)
while request is not None:
for attempt in Retrying(
reraise=True,
stop=stop_after_attempt(10),
retry=retry_if_exception_type(socket.timeout),
):
with attempt:
result = request.execute()
if items_name in result:
items = result[items_name]
if isinstance(items, dict):
for location, item in items.items():
if (
method_name == "aggregatedList"
and exclude_region_resources
and str(location).startswith("regions/")
):
continue
if subitems_name in item:
yield from item[subitems_name]
else:
yield from items
if hasattr(gcp_resource, next_method_name):
method = getattr(gcp_resource, next_method_name)
request = method(request, result)
else:
request = None
def get_result_data(result: Dict, value: Union[str, Callable]) -> Any:
"""Returns data from a GCP API call result dict.
Args:
result: Dict containing the result or a GCP API execute() call.
value: Either directly the name of a key found in result or
a callable like a lambda that finds the relevant data withing
result.
"""
data = None
if callable(value):
try:
data = value(result)
except Exception:
log.exception(f"Exception while trying to fetch data calling {value}")
elif value in result:
data = result[value]
return data
def common_resource_kwargs(resource: BaseResource) -> Dict:
common_kwargs = {}
if resource.account().id != "undefined" and "project" in resource.resource_args:
common_kwargs["project"] = resource.account().id
if resource.zone().name != "undefined" and "zone" in resource.resource_args:
common_kwargs["zone"] = resource.zone().name
elif resource.region().name != "undefined" and "region" in resource.resource_args:
common_kwargs["region"] = resource.region().name
return common_kwargs
def delete_resource(resource: BaseResource) -> bool:
delete_kwargs = {str(resource._delete_identifier): resource.name}
common_kwargs = common_resource_kwargs(resource)
delete_kwargs.update(common_kwargs)
gr = gcp_resource(resource)
request = gr.delete(**delete_kwargs)
request.execute()
return True
def update_label(resource: BaseResource, key: str, value: str) -> bool:
get_kwargs = {str(resource._get_identifier): resource.name}
set_labels_kwargs = {str(resource._set_label_identifier): resource.name}
common_kwargs = common_resource_kwargs(resource)
get_kwargs.update(common_kwargs)
set_labels_kwargs.update(common_kwargs)
labels = dict(resource.tags)
if value is None:
if key in labels:
del labels[key]
else:
return False
else:
labels.update({key: value})
body = {"labels": labels, "labelFingerprint": resource.label_fingerprint}
set_labels_kwargs["body"] = body
gr = gcp_resource(resource)
request = gr.setLabels(**set_labels_kwargs)
response = request.execute()
# Update label_fingerprint
request = gr.get(**get_kwargs)
response = request.execute()
resource.label_fingerprint = response.get("labelFingerprint")
return True
def gcp_service(resource: BaseResource, graph: Graph = None):
service_kwargs = {}
if resource.account().id != "undefined":
service_kwargs["credentials"] = Credentials.get(resource.account(graph).id)
return gcp_client(resource.client, resource.api_version, **service_kwargs)
def gcp_resource(resource: BaseResource, graph: Graph = None):
service = gcp_service(resource, graph)
gr = getattr(service, resource._client_method)
return gr()
|
11494907
|
import scripts.clausecat.clausecat_component
import scripts.clausecat.clause_segmentation
import scripts.clausecat.clausecat_reader
import scripts.clausecat.clausecat_model
import scripts.clausecat.clause_aggregation
import benepar
|
11494919
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy.types import String, Integer
Base = declarative_base()
class World(Base):
__tablename__ = "world"
id = Column(Integer, primary_key = True)
randomNumber = Column(Integer)
def serialize(self):
return {
'id' : int(self.id),
'randomNumber' : int(self.randomNumber)
}
|
11494966
|
import matplotlib.pyplot as plt
import numpy as np
from slam.FactorGraphSimulator import read_factor_graph_from_file
from utils.Visualization import plot_2d_samples
from slam.Variables import Variable, VariableType
import os
from slam.RunBatch import group_nodes_factors_incrementally
from scipy import stats
import matplotlib
from factors.Factors import PriorFactor, SE2RelativeGaussianLikelihoodFactor
from geometry.TwoDimension import SE2Pose
matplotlib.rcParams.update({'font.size': 16})
if __name__ == '__main__':
if_side_plots = False
side_plot_type = "kde" # or "kde"
targ_var_name = "L1"
seed_dir = "res/seed0"
case_list = [seed_dir+'/'+dir for dir in os.listdir(seed_dir) if os.path.isdir(seed_dir+'/'+dir)]
plot_args = {'xlim': (-150, 400), 'ylim': (-150, 400), 'fig_size': (8, 8), 'truth_label_offset': (3, -3)}
incremental_step = 1
num_samples = 500
kde_bw = 'silverman'
for case_folder in case_list:
gtsam_folder = "run6"
fg_file = case_folder+"/factor_graph.fg"
gtsam_dir = f"{case_folder}/{gtsam_folder}"
nodes, truth, factors = read_factor_graph_from_file(fg_file)
plot_dir = f"{gtsam_dir}/traj_video"
if(os.path.exists(plot_dir)):
pass
else:
os.mkdir(plot_dir)
nodes_factors_by_step = group_nodes_factors_incrementally(
nodes=nodes, factors=factors, incremental_step=incremental_step)
rbt_vars = []
var2pose = {}
odom_x = []
odom_y = []
for step in range(len(nodes_factors_by_step)):
step_nodes, step_factors = nodes_factors_by_step[step]
for f in step_factors:
if isinstance(f, PriorFactor):
rbt_vars.append(f.vars[0])
var2pose[f.vars[0]] = SE2Pose(*f.observation)
odom_y.append(var2pose[rbt_vars[-1]].y)
odom_x.append(var2pose[rbt_vars[-1]].x)
elif isinstance(f, SE2RelativeGaussianLikelihoodFactor):
if f.var1 == rbt_vars[-1]:
var2pose[f.var2] = var2pose[f.var1] * SE2Pose(*f.observation)
rbt_vars.append(f.var2)
odom_y.append(var2pose[rbt_vars[-1]].y)
odom_x.append(var2pose[rbt_vars[-1]].x)
cur_factors = []
for step in range(len(nodes_factors_by_step)):
step_file_prefix = f"{plot_dir}/step{step}"
cur_sample = None
step_nodes, step_factors = nodes_factors_by_step[step]
recent_rbt_vars = []
for var in step_nodes:
if var.type == VariableType.Pose:
recent_rbt_vars.append(var)
cur_factors += step_factors
if gtsam_folder[:3] == "cae":
sol_label = "mm-iSAM"
order_file = f"{gtsam_dir}/batch{step+1}.ordering"
sample_file = f"{gtsam_dir}/batch{step+1}"
elif gtsam_folder[:3] == "gts":
sol_label = "max-mixtures"
step_offset = 0
order_file = f"{gtsam_dir}/batch_{step}_ordering"
sample_file= f"{gtsam_dir}/batch{step}"
elif gtsam_folder[:3] == "dyn":
sample_file = f"{gtsam_dir}/step{step}.sample"
order_file = f"{gtsam_dir}/step{step}_ordering"
step_offset = 0
sol_label = "Nested sampling"
else:
order_file = f"{gtsam_dir}/step{step}_ordering"
sample_file = f"{gtsam_dir}/step{step}"
step_offset = 0
sol_label = "NF-iSAM"
if os.path.exists(sample_file):
cur_sample = np.loadtxt(fname=sample_file)
if cur_sample.shape[0] > num_samples:
cur_sample = cur_sample[np.random.choice(np.arange(len(cur_sample)), num_samples, False)]
order = Variable.file2vars(order_file=order_file)
if not if_side_plots:
fig, ax = plt.subplots(figsize=plot_args['fig_size'])
ax.plot(odom_x, odom_y, '-', c = '0.8')
plot_2d_samples(ax=ax, samples_array=cur_sample, variable_ordering=order,
show_plot=False, equal_axis=False,
# truth={variable: pose for variable, pose in
# truth.items() if variable in order},
# truth_factors={factor for factor in cur_factors},
truth=truth,
truth_factors=factors,
file_name=f"{step_file_prefix}.png", title=f'{sol_label} (step {step})',
plot_all_meas = False,
plot_meas_give_pose = recent_rbt_vars,
rbt_traj_no_samples = True,
truth_R2 = True,
truth_SE2 = False,
truth_odometry_color = 'k',
truth_landmark_markersize = 15,
**plot_args)
plt.close()
else:
# start with a square Figure
fig = plt.figure(figsize=plot_args['fig_size'])
fig.suptitle(f'{sol_label} (step {step})')
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
# the size of the marginal axes and the main axes in both directions.
# Also adjust the subplot parameters for a square plot.
gs = fig.add_gridspec(2, 2, width_ratios=(5, 1), height_ratios=(1, 5),
left=0.15, right=0.95, bottom=0.1, top=0.9,
wspace=0.05, hspace=0.05)
ax = fig.add_subplot(gs[1, 0])
ax_histx = fig.add_subplot(gs[0, 0], sharex=ax)
ax_histy = fig.add_subplot(gs[1, 1], sharey=ax)
ax_histx.tick_params(axis="x", labelbottom=False)
ax_histy.tick_params(axis="y", labelleft=False)
plot_2d_samples(ax=ax, samples_array=cur_sample, variable_ordering=order,
show_plot=False, equal_axis=False,
truth={variable: pose for variable, pose in
truth.items() if variable in order},
truth_factors={factor for factor in cur_factors},
**plot_args)
# use the previously defined function
exist_names = [var.name for var in order]
if targ_var_name in set(exist_names):
targ_var = order[exist_names.index(targ_var_name)]
straight_x = np.linspace(truth[targ_var][1], plot_args['ylim'][1], 10)
straight_y = np.linspace(truth[targ_var][0], plot_args['xlim'][1], 10)
ax.plot(straight_y, truth[targ_var][1] * np.ones_like(straight_y), '--r')
ax.plot(truth[targ_var][0] * np.ones_like(straight_x), straight_x, '--r')
cur_dim = 0
for var in order:
if var.name == targ_var_name:
break
cur_dim += var.dim
x = cur_sample[:, cur_dim]
y = cur_sample[:, cur_dim+1]
if side_plot_type == "hist":
binwidth = 1.0
xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
lim = (int(xymax / binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
ax_histx.hist(x, bins=bins)
ax_histy.hist(y, bins=bins, orientation='horizontal')
else:
pts = np.linspace(plot_args['xlim'][0], plot_args['xlim'][1], 500)
x_kernel = stats.gaussian_kde(x, bw_method=kde_bw)
y_kernel = stats.gaussian_kde(y, bw_method=kde_bw)
ax_histx.plot(pts, x_kernel(pts), '-b', label=f'{targ_var_name}x')
ax_histx.legend(prop={'size': 9})
ax_histy.plot(y_kernel(pts), pts, '-b', label=f'{targ_var_name}y')
ax_histy.set_xlim([0, 1.1*max(y_kernel(pts))])
# ax_histy.invert_yaxis()
ax_histy.legend(prop={'size': 9})
else:
ax_histx.axis("off")
ax_histy.axis("off")
plt.savefig(f"{step_file_prefix}.png", dpi=300)
plt.show()
else:
if not if_side_plots:
plt.figure(figsize=plot_args['fig_size'])
plt.plot(0,0)
plt.xlim(plot_args['xlim'])
plt.ylim(plot_args['ylim'])
plt.title(f"{sol_label} (step {step})")
plt.xlabel('x(m)')
plt.ylabel('y(m)')
plt.savefig(f"{step_file_prefix}.png", dpi=300)
plt.show()
plt.close()
else:
fig = plt.figure(figsize=plot_args['fig_size'])
fig.suptitle(f"{sol_label} (step {step})")
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
# the size of the marginal axes and the main axes in both directions.
# Also adjust the subplot parameters for a square plot.
gs = fig.add_gridspec(2, 2, width_ratios=(5, 1), height_ratios=(1, 5),
left=0.15, right=0.95, bottom=0.1, top=0.9,
wspace=0.05, hspace=0.05)
ax = fig.add_subplot(gs[1, 0])
ax_histx = fig.add_subplot(gs[0, 0], sharex=ax)
ax_histy = fig.add_subplot(gs[1, 1], sharey=ax)
ax_histy.axis("off")
ax_histx.axis("off")
ax_histx.tick_params(axis="x", labelbottom=False)
ax_histy.tick_params(axis="y", labelleft=False)
ax.plot(0,0)
ax.set_xlim(plot_args['xlim'])
ax.set_ylim(plot_args['ylim'])
ax.set_xlabel('x (m)')
ax.set_ylabel('y (m)')
plt.savefig(f"{step_file_prefix}.png", dpi=300)
plt.show()
plt.close()
|
11494989
|
import os
from collections import OrderedDict
import torch
import torch.nn as nn
from .. import layer as vn_layer
from .brick import shufflenet as bsn
__all__ = ['shufflenetg2', 'shufflenetg3']
# default shufflenet g2
class Shufflenet(nn.Module):
def __init__(self, cfg):
super().__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
# Network
layers_list = [
# Sequence 0 : input = image tensor
OrderedDict([
('stage3/convbatchrelu', vn_layer.Conv2dBatchReLU(3, 24, 3, 2)),
('stage3/max', nn.MaxPool2d(3, 2, 1)),
]),
OrderedDict([
('Stage4', bsn.Stage(24, out_planes[0], groups, num_blocks[0])),
]),
OrderedDict([
('Stage5', bsn.Stage(out_planes[0], out_planes[1], groups, num_blocks[1])),
]),
OrderedDict([
('Stage6', bsn.Stage(out_planes[1], out_planes[2], groups, num_blocks[2])),
# the following is extra
]),
]
self.layers = nn.ModuleList([nn.Sequential(layer_dict) for layer_dict in layers_list])
def forward(self, x):
stem = self.layers[0](x)
stage4 = self.layers[1](stem)
stage5 = self.layers[2](stage4)
stage6 = self.layers[3](stage5)
features = [stage6, stage5, stage4]
return features
def shufflenetg2():
cfg = {
'out_planes': [200, 400, 800],
'num_blocks': [4, 8, 4],
'groups': 2
}
return Shufflenet(cfg)
def shufflenetg3():
cfg = {
'out_planes': [240, 480, 960],
'num_blocks': [4, 8, 4],
'groups': 3
}
return Shufflenet(cfg)
|
11495018
|
def pressure(r, ro, g):
p = np.zeros(len(r))
r = r *1000
for i in range(0,len(r)):
r1 = r[i:len(r)]
ro1 = ro[i:len(r)]
g1 = g[i:len(r)]
y = ro1*g1
p1 = trapz(y,r1)
p[i] = p1
return p
p = pressure(r,ro,g)/1e9 # expressed in GPa
z = np.linspace(6400, 1, 6400)
fig, ax = plt.subplots()
ax.plot(z,p)
ax.grid()
ax.set_ylabel('P [GPa]')
ax.set_xlabel('Depth z from the Earth Surface [km]')
|
11495019
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import sys
import time
import datetime
from src.EN import EntityNetwork
import numpy as np
import tensorflow as tf
import tflearn
import random
import docopt
import cPickle as pickle
import logging
import datetime
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
#plt.rc('text', usetex=True)
#plt.rc('font', family='Times-Roman')
sns.set_style(style='white')
def train(epoch,batch_size, data,par,dr, _test):
def val_test(d,ty):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def viz(epoch):
s_s=[]
### mb_x1 input senteces
for m in mb_x1[0]:
t = []
for e in m:
if(e != 0):
t.append(data._data['vocab'][e-1])
if(len(t) > 0):
s_s.append(t)
s_s = [" ".join(sss) for sss in s_s]
q_q = []
for e2 in mb_x2[0][0]:
if(e2 != 0):
q_q.append(data._data['vocab'][e2-1])
q_q = " ".join(q_q)
k,o,s,q,l = sess.run([entity_net.keys,entity_net.out,entity_net.story_embeddings,entity_net.query_embedding,entity_net.length],feed_dict=dic)
gs=[]
for i in range(int(l)):
temp = np.split(o[0][i], len(k))
g =[]
for j in range(len(k)):
g.append(sigmoid(np.inner(s[0][i],temp[j])+np.inner(s[0][i],k[j])+np.inner(s[0][i],q[0][0])))
gs.append(g)
plt.figure(figsize=(15,7.5))
ax = sns.heatmap(np.transpose(np.array(gs)),cmap="YlGnBu",vmin=0, vmax=1)
ax.set_xticks([i for i in range(len(s_s))])
ax.set_xticklabels(s_s,rotation=45)
ax.set_yticklabels([ i+1 for i in range(len(k)) ],rotation=0 )
plt.title(q_q+"?")
plt.tight_layout()
plt.savefig('data/plot/ep%d.pdf'%int(epoch), format='pdf', dpi=300)
plt.close()
_loss_val, _acc_val, _counter = 0.0, 0.0, 0
for idx, (mb_x1, mb_x2, mb_y) in enumerate(d):
dic = {entity_net.S:mb_x1,entity_net.Q:mb_x2,
entity_net.A:mb_y, entity_net.keep_prob:1.0}
curr_loss_val, curr_acc_val = sess.run([entity_net.loss_val, entity_net.accuracy], feed_dict=dic)
_loss_val, _acc_val, _counter = _loss_val + curr_loss_val, _acc_val + curr_acc_val, _counter + 1
if(ty=='Validation' and e % 10 ==0 ):
logging.info("Start plotting")
viz(e)
logging.info("Epoch %d\t%s Loss: %.3f\t %s Accuracy: %.3f" % (e,ty[:4],_loss_val / float(_counter),ty, _acc_val/float(_counter)))
return _loss_val / float(_counter), _acc_val/float(_counter)
def tr(verbose):
loss, acc, counter = 0.0, 0.0, 0
np.random.shuffle(all_train)
for idx, (mb_x1, mb_x2, mb_y) in enumerate(all_train):
dic = {entity_net.S:mb_x1,entity_net.Q:mb_x2,
entity_net.A:mb_y, entity_net.keep_prob:dr}
curr_loss, curr_acc, _ = sess.run([entity_net.loss_val, entity_net.accuracy, entity_net.train_op],
feed_dict=dic)
loss, acc, counter = loss + curr_loss, acc + curr_acc, counter + 1
if counter % verbose == 0:
logging.info("Epoch %d\tTrain Loss: %.3f\tTrain Accuracy: %.3f" % (e, loss / float(counter), acc / float(counter)))
return loss / float(counter), acc / float(counter)
def init():
# Setup Checkpoint + Log Paths
train_loss, train_acc, val_loss, val_acc, test_loss, test_acc = {}, {}, {}, {}, {}, {}
return train_loss, train_acc, val_loss, val_acc, test_loss, test_acc
tf.reset_default_graph()
with tf.Session() as sess:
entity_net = EntityNetwork(**par)
train_loss, train_acc, val_loss, val_acc, test_loss, test_acc = init()
logging.info('Initializing Variables!')
sess.run(tf.global_variables_initializer())
# Get Current Epoch
curr_epoch = sess.run(entity_net.epoch_step)
logging.info('Training stated')
all_train = data.gen_examples(batch_size,'train')
all_val = data.gen_examples(1,'val')
all_test = data.gen_examples(batch_size,'test')
best_val,patient= 0.0, 0.0
for e in range(curr_epoch,epoch):
train_loss[e], train_acc[e] = tr(20)
val_loss[e], val_acc[e] = val_test(all_val,'Validation')
if (_test):
test_loss[e], test_acc[e] = val_test(all_test,'Test')
# Update best_val
if val_acc[e] > best_val:
best_val, patient = val_acc[e], 0
elif val_acc[e] > best_val:
patient += 0.5
else:
patient += 1.0
# Early Stopping Condition
if patient > 100.0:
break
sess.run(entity_net.epoch_increment)
return train_loss, train_acc, val_loss, val_acc, test_loss, test_acc
# for i, elem in enumerate(data.get_batch_train(batch_size,'train')):
# data.get_dic_train(entity_net.S,entity_net.Q,entity_net.A,entity_net.keep_prob,elem[0],elem[1],dr)
|
11495020
|
import numpy as np
import json
import requests
import sys
# python delete_custom_charges.py 0000000000000000000000000000000000000000000000000000000000000000
def load_custom_charges(custom_charges_file):
custom_charges_dtype = [("Id", "U36")]
custom_charges = np.loadtxt(custom_charges_file, dtype="U36", delimiter=",")
# print(custom_charges)
return custom_charges
def delete_custom_fixed_charge(env, admin_api_key,custom_charge_id, payer_account_name):
api_url = env + "api/billing.json/delete_custom_billing_charge"
chargeData = json.dumps({"Id": custom_charge_id, "use_account": payer_account_name})
r7 = requests.post(api_url, headers = {"Content-Type": "application/json", "access_key": admin_api_key}, data = chargeData)
print("Deleted the custom charge " + str(custom_charge_id))
print(r7.json())
def cycle_custom_charges(env, admin_api_key, custom_charges, payer_account_name):
charge_ids = np.array([])
for i in np.arange(0, np.shape(custom_charges)[0]):
charge_id = delete_custom_fixed_charge(env, admin_api_key,custom_charges[i], payer_account_name)
def main():
try:
admin_api_key = str(sys.argv[1])
except IndexError:
print("Must admin_api_key")
return
env = "https://api.cloudcheckr.com/"
payer_account_name = "Payer Master Account"
custom_charges_file = "added_charges.csv"
custom_charges = load_custom_charges(custom_charges_file)
charge_ids = cycle_custom_charges(env, admin_api_key, custom_charges, payer_account_name)
if __name__ == "__main__":
main()
|
11495069
|
from pyNastran.converters.cart3d.cart3d import Cart3D, read_cart3d
from pyNastran.converters.tecplot.tecplot import Tecplot, Zone
def cart3d_to_tecplot(cart3d_filename, tecplot_filename, log=None, debug=False):
"""
Converts Cart3d to Tecplot
"""
if isinstance(cart3d_filename, Cart3D):
model = cart3d_filename
else:
model = read_cart3d(cart3d_filename, log=log, debug=debug)
tecplot = Tecplot()
tecplot.log = model.log
zone = Zone(model.log)
zone.headers_dict['VARIABLES'] = ['X', 'Y', 'Z']
zone.xyz = model.points
zone.tri_elements = model.elements + 1
tecplot.zones = [zone]
tecplot.write_tecplot(tecplot_filename, adjust_nids=False)
return tecplot
|
11495073
|
from functools import wraps
import logging
import os
from django.conf import settings
from django.db import ProgrammingError, connection
from django.utils import timezone
from django_q.models import Schedule
from django_q.tasks import schedule
from qatrack.qatrack_core.utils import today_start_end
logger = logging.getLogger('django-q')
def qatrack_task_wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
if os.name.lower() == "nt":
try:
connection.cursor()
except ProgrammingError:
connection.connect()
return func(*args, **kwargs)
return wrapped
def _schedule_periodic_task(function, task_name, interval_min=None, next_run=None, schedule_type=Schedule.MINUTES):
"""Create a periodic schedule calling input function. Default interval is 15min"""
if schedule_type == Schedule.MINUTES and interval_min is None:
interval_min = 15
now = timezone.now()
if next_run is None:
# set initial schedule to 7.5 minutes after then next quarter hour
# the schedule will then run every 15 minutes at HH:07:30, HH:22:30, HH:37:30, HH:52:30
next_run = now.replace(
minute=0, second=0, microsecond=0
) + timezone.timedelta(seconds=int(interval_min * 60 / 2.))
while next_run < now:
next_run += timezone.timedelta(minutes=interval_min)
try:
sch = Schedule.objects.get(name=task_name)
sch.func = function
sch.minutes = interval_min
sch.next_run = next_run
sch.save()
logger.info("%s next run updated to %s" % (function, next_run))
except Schedule.DoesNotExist:
schedule(
function,
name=task_name,
schedule_type=schedule_type,
minutes=interval_min,
repeats=-1,
next_run=next_run,
)
logger.info("%s schedule created and next run set to %s" % (function, next_run))
@qatrack_task_wrapper
def run_periodic_scheduler(model, log_name, handler, time_field="time", recurrence_field="recurrence"):
"""Check a model with a recurring schedule for instances that should be run in the next time period.
model: the Django model to check,
log_name: short description to include in log strings,
handler: a function that will be called when an instance should be run
in the current time period must accept an instance of model, and a
datetime when the task should be scheduled for. The handler function
should perform the actual scheduling of the task.
time_field: The name of the field that holds what time the task should be run
recurrence_field: The name of the field that holds the recurrence field
"""
start_today, end_today = today_start_end()
start_today = start_today.replace(tzinfo=None)
end_today = end_today.replace(tzinfo=None)
now = timezone.localtime(timezone.now()).replace(tzinfo=None)
start_time, end_time = (now, now + timezone.timedelta(minutes=15))
logger.info("Running %s task at %s for notices between %s and %s" % (log_name, now, start_time, end_time))
# first narrow down notices to those supposed to run in this 15 minute block
instances = model.objects.filter(
**{
"%s__gte" % time_field: start_time.strftime("%H:%M"),
"%s__lte" % time_field: end_time.strftime("%H:%M"),
}
)
if settings.DEBUG: # pragma: nocover
instances = model.objects.all()
# now look at recurrences occuring today and see if they should be sent now
for instance in instances:
# since our recurrence can only occur with a maximum frequency of 1 /
# day, between will just return ~timezone.now() if the report is to be
# sent today. If we make reports available at a higher frequency this
# check will need to be adjusted.
occurences = getattr(instance, recurrence_field).between(start_today, end_today)
logger.info(
"Occurences for %s %s: %s (between %s & %s)" % (
model._meta.model_name,
instance.id,
occurences,
start_today,
end_today,
)
)
if occurences and start_time.time() <= getattr(instance, time_field) <= end_time.time():
tz = timezone.get_current_timezone()
send_time = tz.localize(timezone.datetime.combine(start_today, getattr(instance, time_field)))
handler(instance, send_time)
|
11495113
|
from pathlib import Path
import typer
from .shell import shell
CURRENT_PATH = Path(__file__)
ROOT_PATH = CURRENT_PATH.parents[1]
NOTEBOOKS_FOLDER = ROOT_PATH / "examples"
NOTEBOOKS_TO_SKIP = []
def run_notebooks():
for notebook_path in NOTEBOOKS_FOLDER.glob("*.ipynb"):
if notebook_path.name in NOTEBOOKS_TO_SKIP:
typer.echo(f"Skipping {notebook_path}")
continue
typer.echo(f"Running {notebook_path}")
shell(
f"poetry run python -m jupyter nbconvert --ExecutePreprocessor.kernel_name=python3 --to notebook --execute {notebook_path}"
)
if __name__ == "__main__":
run_notebooks()
|
11495149
|
import os
asciidirectory = os.getcwd() + '/pastes/asciipastes/' #relative path of ASCII pastes.
save_path = os.getcwd() + '/decodedexes/' #relative path of stored executables.
def writefile(filenm, stuff):
writefile = open(filenm,'w')
writefile.write(stuff)
writefile.close()
for filename in os.listdir(asciidirectory):
paste = os.path.join(asciidirectory, filename)
outputfile = save_path + filename
with open(paste, 'r') as f:
paste_data = f.read()
try:
paste_data_normalized = [int(i) for i in paste_data.split()]
decoded_paste = "".join([chr(c) for c in paste_data_normalized])
writefile(outputfile, decoded_paste) # write pe32exe
os.remove(paste)
except:
continue
f.close()
|
11495167
|
import sys
import os
import pytest
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir)
)
sys.path.append(PROJECT_ROOT)
# Import der Bibliotheken
from RFEM.enums import *
from RFEM.initModel import Model, SetAddonStatus, CheckIfMethodOrTypeExists
from RFEM.TypesForSteelDesign.steelBoundaryConditions import *
if Model.clientModel is None:
Model()
@pytest.mark.skipif(CheckIfMethodOrTypeExists(Model.clientModel,'ns0:steel_boundary_conditions', True), reason="Type ns0:steel_boundary_conditions not in RFEM GM yet")
def test_steelEffectiveLengths():
Model.clientModel.service.begin_modification()
SetAddonStatus(Model.clientModel, AddOn.steel_design_active, True)
SteelBoundaryConditions(1, [True, "BCTEST_1"], '1', '', True, True, True,
nodal_supports= [
[None, SteelBoundaryConditionsSupportType.SUPPORT_TYPE_FIXED_IN_Y_AND_TORSION, False, True, False, True, False, False, False,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, SteelBoundaryConditionsEccentricityTypeZ.ECCENTRICITY_TYPE_USER_VALUE, 0.0, 0.0, 0.0, ""],
[None, SteelBoundaryConditionsSupportType.SUPPORT_TYPE_FIXED_IN_Y_AND_TORSION, False, True, False, True, False, False, False,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, SteelBoundaryConditionsEccentricityTypeZ.ECCENTRICITY_TYPE_USER_VALUE, 0.0, 0.0, 0.0, ""]
],
member_hinges=[
["Start", False, False, False, False, False, False, False, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ""],
["End", False, False, False, False, False, False, False, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ""]
]
)
SteelBoundaryConditions(2, [True, "BCTEST_2"], '', '', True, True, True,
nodal_supports= [
[None, SteelBoundaryConditionsSupportType.SUPPORT_TYPE_FIXED_IN_Y_AND_TORSION, False, True, False, True, False, False, False,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, SteelBoundaryConditionsEccentricityTypeZ.ECCENTRICITY_TYPE_USER_VALUE, 0.0, 0.0, 0.0, ""],
[None, SteelBoundaryConditionsSupportType.SUPPORT_TYPE_FIXED_IN_Y_AND_TORSION, False, True, False, True, False, False, False,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, SteelBoundaryConditionsEccentricityTypeZ.ECCENTRICITY_TYPE_USER_VALUE, 0.0, 0.0, 0.0, ""],
[None, SteelBoundaryConditionsSupportType.SUPPORT_TYPE_FIXED_IN_Y_AND_TORSION, False, True, False, True, False, False, False,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, SteelBoundaryConditionsEccentricityTypeZ.ECCENTRICITY_TYPE_USER_VALUE, 0.0, 0.0, 0.0, ""]
],
member_hinges=[
["Start", False, False, False, False, False, False, False, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ""],
["Inter.", False, False, False, False, False, False, False, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ""],
["End", True, False, False, True, False, False, True, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ""]
]
)
bc_1 = Model.clientModel.service.get_steel_boundary_conditions(1)
assert bc_1.no == 1
assert bc_1.nodal_supports[0][0][0] == 0
bc_2 = Model.clientModel.service.get_steel_boundary_conditions(2)
assert bc_2.member_hinges[0][1][1] == "Inter."
Model.clientModel.service.finish_modification()
|
11495171
|
import json
import os
from typing import List, Dict
import pymysql
import yaml
def main():
with open("./docker-compose.yaml", "r", encoding="utf8") as f:
compose = yaml.safe_load(f.read())
with open("./sql_script_load_order.txt", "r") as f:
sql_scripts = f.readlines()
sql_scripts = [line.rstrip() for line in sql_scripts]
container_config = compose["services"]["mysql"]["environment"]
check_sql_scripts(sql_scripts)
check_tables(container_config)
def check_sql_scripts(sql_scripts: List[str]):
configured_sql = set(
[os.path.abspath(line) for line in sql_scripts if line] # exclude empty lines
)
current_sql = set(get_all_sql_file_path())
if configured_sql - current_sql:
view = json.dumps(
[
"./" + os.path.relpath(x, os.getcwd()).replace("\\", "/")
for x in configured_sql - current_sql
],
indent=2,
)
raise ValueError(
f"sql script ordered in sql_script_load_order.txt but not found in sql/: {view}"
)
if current_sql - configured_sql:
view = json.dumps(
[
"./" + os.path.relpath(x, os.getcwd()).replace("\\", "/")
for x in current_sql - configured_sql
],
indent=2,
)
raise ValueError(
f"sql script exists in sql/ but not found in sql_script_load_order.txt: {view}"
)
def check_tables(container_config: Dict[str, str]):
Expected_Tables = {
"chii_apps",
"chii_characters",
"chii_crt_cast_index",
"chii_crt_comments",
"chii_crt_subject_index",
"chii_ep_comments",
"chii_ep_revisions",
"chii_episodes",
"chii_group_posts",
"chii_group_topics",
"chii_index",
"chii_index_collects",
"chii_index_comments",
"chii_index_related",
"chii_memberfields",
"chii_members",
"chii_oauth_access_tokens",
"chii_oauth_clients",
"chii_os_web_sessions",
"chii_person_alias",
"chii_person_collects",
"chii_person_cs_index",
"chii_person_fields",
"chii_person_relationship",
"chii_persons",
"chii_prsn_comments",
"chii_rev_history",
"chii_rev_text",
"chii_subject_alias",
"chii_subject_fields",
"chii_subject_interests",
"chii_subject_posts",
"chii_subject_relations",
"chii_subject_revisions",
"chii_subject_topics",
"chii_subjects",
"chii_timeline",
"chii_usergroup",
}
# 打开数据库连接
db = pymysql.connect(
host="127.0.0.1",
database=container_config["MYSQL_DATABASE"],
user=container_config["MYSQL_USER"],
password=container_config["MYSQL_PASSWORD"],
)
# 使用 cursor() 方法创建一个游标对象 cursor
with db.cursor() as cursor:
# 使用 execute() 方法执行 SQL 查询
cursor.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cursor.fetchone()
assert data[0] == "5.7.33", type(data)
print("Database version : %s " % data)
with db.cursor() as cursor:
cursor.execute("select * from information_schema.tables")
data = cursor.fetchall()
tables = set()
for table in data:
if table[1] == container_config["MYSQL_DATABASE"]:
tables.add(table[2])
assert (
Expected_Tables == tables
), f"missing tables {Expected_Tables - tables}, extra tables {tables - Expected_Tables} in database"
db.close()
def get_all_sql_file_path() -> List[str]:
data = []
for root, _, files in os.walk("./sql/"):
for file in files:
data.append(os.path.abspath(os.path.join(root, file)))
return data
if __name__ == "__main__":
main()
|
11495207
|
import factory
from application.models import db, User, Role
from .role_factory import RoleFactory
from werkzeug.security import generate_password_hash, check_password_hash
class UserFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = User
sqlalchemy_session = db.session
sqlalchemy_session_persistence = 'commit'
name = factory.Faker("first_name")
# name = factory.Faker('name')
password ="<PASSWORD>"
email = factory.Faker('email')
active = True
class AdminUserFactory(UserFactory):
@factory.post_generation
def roles(self, create, extracted, **kwargs):
r = Role.find(name='admin')
if not r:
r = RoleFactory(name='admin')
self.roles.append(r)
db.session.commit()
class EditorUserFactory(UserFactory):
@factory.post_generation
def roles(self, create, extracted, **kwargs):
r = Role.find(name='editor')
if not r:
r = RoleFactory(name='editor')
self.roles.append(r)
db.session.commit()
class ApproverUserFactory(UserFactory):
@factory.post_generation
def roles(self, create, extracted, **kwargs):
r = Role.find(name='approver')
if not r:
r = RoleFactory(name='approver')
self.roles.append(r)
db.session.commit()
|
11495221
|
from collections import namedtuple
from .util import fatal
Env = namedtuple('Env', ['name', 'short', 'other'])
ENVIRONMENTS = [
Env('development', 'dev', ['debug']),
Env('staging', 'staging', ['stage']),
Env('production', 'prod', ['release']),
Env('testing', 'test', ['qa'])
]
STANDARD_ENV_NAMES = [env.name for env in ENVIRONMENTS]
SHORT_ENV_NAMES = [env.short for env in ENVIRONMENTS]
DEV_PROD_NAMES = ['dev', 'devleopment', 'prod', 'production']
def get_env(name):
for env in ENVIRONMENTS:
if env.name == name or env.short == name or name in env.other:
return env
return None
def short_env_name(name):
env = get_env(name)
return env.short if env else name
def env_to_release_or_debug(name, other=False):
if name in ['release', 'production', 'prod']:
return 'release'
elif name in ['debug', 'development', 'dev']:
return 'debug'
elif other:
return name
fatal('Unsupport --env value. Please use --release/--debug or ' +
'one of the equivalent aliases.')
def is_env_release(name):
return env_to_release_or_debug(name) == 'release'
def all_env_names(name):
env = get_env(name)
if env:
return set([env.name, env.short, *env.other])
else:
return set([name])
|
11495225
|
from typing import Dict, List
def _merge(name, d, override_keys):
override = d.pop("override", None)
if override is not None:
if not isinstance(override, dict):
raise ValueError(
f"{name}.override: got {type(override)}, must be a dictionary"
)
for k in override_keys:
v = override.get(k, None)
if v is not None:
if not isinstance(v, dict):
raise ValueError(
f"{name}.override.{k}: got {type(v)}, must be a dictionary"
)
d.update(v)
for k, v in d.items():
if isinstance(v, dict):
_merge(f"{name}.{k}", v, override_keys)
elif isinstance(v, list):
for i in v:
if isinstance(i, dict):
_merge(f"{name}.{k}[{i}]", i, override_keys)
def apply_overrides(d: Dict, override_keys: List[str]):
"""
The idea is that any dictionary key can contain a key
called 'override', which will be processed as follows:
- override contents must be a dictionary
- key is a potential 'override key'
- value must be a dictionary
- If a key matches an override key, then the contained
dictionary will be shallow updated with the contents
of the dictionary
- This will recurse all structures and implement overrides
wherever dictionaries are found
Currently, if a list is overridden, the entire list is replaced. It may
make sense to provide
"""
_merge("", d, override_keys)
def _main():
import argparse
import toml
parser = argparse.ArgumentParser()
parser.add_argument("fname")
parser.add_argument("key")
args = parser.parse_args()
with open(args.fname) as fp:
d = toml.load(fp)
apply_overrides(d, [args.key])
print(toml.dumps(d))
if __name__ == "__main__":
_main()
|
11495234
|
from feathr.anchor import FeatureAnchor
from feathr.source import Source
from feathr.feature_derivations import DerivedFeature
from feathr.feature import Feature
from feathr.transformation import Transformation
from typing import Set
class RepoDefinitions:
"""A list of shareable Feathr objects defined in the project."""
def __init__(self,
sources: Set[Source],
features: Set[Feature],
transformations: Set[Transformation],
feature_anchors: Set[FeatureAnchor],
derived_features: Set[DerivedFeature]) -> None:
self.sources = sources
self.features = features
self.transformations = transformations
self.feature_anchors = feature_anchors
self.derived_features = derived_features
|
11495239
|
import re
from share.transform.chain import *
EMAIL_RE = re.compile(r'\S+@\S+')
class WorkIdentifier(Parser):
uri = IRI(ctx)
class AgentIdentifier(Parser):
uri = IRI(ctx, urn_fallback=True)
class RelatedAgent(Parser):
schema = GuessAgentType(ctx, default='organization')
name = ctx
class IsAffiliatedWith(Parser):
related = Delegate(RelatedAgent, ctx)
class AbstractAgent(Parser):
identifiers = Map(
Delegate(AgentIdentifier),
Try(ctx.email),
RunPython('to_str', ctx.contributorId)
)
related_agents = Map(
Delegate(IsAffiliatedWith),
Try(ctx.affiliation.text),
RunPython('maybe_usgs', Try(ctx.usgs))
)
def to_str(self, obj):
return str(obj)
def maybe_usgs(self, obj):
if obj:
# How USGS references itself as a work publisher
return 'U.S. Geological Survey'
return None
class Organization(AbstractAgent):
schema = GuessAgentType(ctx.text, default='organization')
name = ctx.text
class Person(AbstractAgent):
given_name = Maybe(ctx, 'given')
family_name = Maybe(ctx, 'family')
class Creator(Parser):
order_cited = ctx('index')
cited_as = RunPython('strip_emails', ctx.text)
agent = Delegate(Person, ctx)
def strip_emails(self, obj):
return EMAIL_RE.sub('', obj)
class PublisherAgent(Parser):
schema = GuessAgentType(ctx.publisher, default='organization')
name = ctx.publisher
location = Try(ctx.publisherLocation)
class Publisher(Parser):
agent = Delegate(PublisherAgent, ctx)
class CreativeWork(Parser):
schema = RunPython('get_schema', ctx.publicationType.text)
title = ctx.title
description = Maybe(ctx, 'docAbstract')
date_updated = ParseDate(ctx.lastModifiedDate)
date_published = ParseDate(ctx.displayToPublicDate)
language = Maybe(ctx, 'language')
related_agents = Concat(
Map(
Delegate(Creator),
Filter(lambda a: not a['corporation'], Try(ctx.contributors.authors))
),
Map(
Delegate(Creator.using(agent=Delegate(Organization, ctx))),
Filter(lambda a: a['corporation'], Try(ctx.contributors.authors))
),
Try(Delegate(Publisher, ctx))
)
identifiers = Map(
Delegate(WorkIdentifier),
RunPython('format_usgs_id_as_url', ctx.indexId),
Try(ctx.doi)
)
class Extra:
additional_online_files = Maybe(ctx, 'additionalOnlineFiles')
country = Maybe(ctx, 'country')
defined_type = Maybe(ctx, 'defined_type')
end_page = Maybe(ctx, 'endPage')
geographic_extents = Maybe(ctx, 'geographicExtents')
index_id = Maybe(ctx, 'indexId')
ipds_id = Maybe(ctx, 'ipdsId')
issue = Maybe(ctx, 'issue')
links = Maybe(ctx, 'links')
online_only = Maybe(ctx, 'onlineOnly')
other_geospatial = Maybe(ctx, 'otherGeospatial')
publication_subtype = Maybe(ctx, 'publicationSubtype')
publication_year = Maybe(ctx, 'publicationYear')
start_page = Maybe(ctx, 'startPage')
state = Maybe(ctx, 'state')
type = Maybe(ctx, 'type')
volume = Maybe(ctx, 'volume')
def get_schema(self, publication_type):
return {
'Article': 'Article',
'Book': 'Book',
'Book chapter': 'Book',
'Conference Paper': 'ConferencePaper',
'Dataset': 'DataSet',
# 'Pamphlet':
# 'Patent':
'Report': 'Report',
'Speech': 'Presentation',
'Thesis': 'Thesis',
# 'Videorecording':
}.get(publication_type) or 'CreativeWork'
def format_usgs_id_as_url(self, id):
return 'https://pubs.er.usgs.gov/publication/{}'.format(id)
class USGSTransformer(ChainTransformer):
VERSION = 1
root_parser = CreativeWork
|
11495242
|
from attributes.unit_test.discoverer import TestDiscoverer
class CSharpTestDiscoverer(TestDiscoverer):
def __init__(self):
self.language = 'C#'
self.languages = ['C#']
self.extensions = ['*.cs']
self.frameworks = [
self.__nunit__,
self.__vs_unit_testing__,
self.__xunit__,
]
def __nunit__(self, path, sloc):
pattern = 'using NUnit.Framework;'
return self.measure(path, sloc, pattern)
def __vs_unit_testing__(self, path, sloc):
pattern = 'using Microsoft.VisualStudio.TestTools.UnitTesting;'
return self.measure(path, sloc, pattern)
def __xunit__(self, path, sloc):
pattern = 'using Xunit;'
return self.measure(path, sloc, pattern)
|
11495269
|
import bpy
from io_scene_vrm.common.human_bone import HumanBoneName
from io_scene_vrm.editor.vrm0.property_group import Vrm0HumanoidPropertyGroup
def test() -> None:
bpy.ops.icyp.make_basic_armature()
armatures = [obj for obj in bpy.data.objects if obj.type == "ARMATURE"]
assert len(armatures) == 1
armature = armatures[0]
human_bones = armature.data.vrm_addon_extension.vrm0.humanoid.human_bones
original = list(map(lambda b: (str(b.node.value), str(b.bone)), human_bones))
human_bone1 = human_bones.add()
human_bone1.bone = "NoHumanBone"
Vrm0HumanoidPropertyGroup.fixup_human_bones(armature)
assert original == list(
map(lambda b: (str(b.node.value), str(b.bone)), human_bones)
)
human_bone2 = human_bones.add()
human_bone2.bone = HumanBoneName.CHEST.value
Vrm0HumanoidPropertyGroup.fixup_human_bones(armature)
assert original == list(
map(lambda b: (str(b.node.value), str(b.bone)), human_bones)
)
human_bones.add()
human_bones.add()
human_bones.add()
Vrm0HumanoidPropertyGroup.fixup_human_bones(armature)
assert original == list(
map(lambda b: (str(b.node.value), str(b.bone)), human_bones)
)
chest_bone = list(
filter(lambda b: b.bone == HumanBoneName.CHEST.value, human_bones)
)[0]
spine_bone = list(
filter(lambda b: b.bone == HumanBoneName.SPINE.value, human_bones)
)[0]
chest_bone.node.value = HumanBoneName.SPINE.value
assert spine_bone.node.value == HumanBoneName.SPINE.value
assert chest_bone.node.value == HumanBoneName.SPINE.value
Vrm0HumanoidPropertyGroup.fixup_human_bones(armature)
assert spine_bone.node.value == HumanBoneName.SPINE.value
assert not chest_bone.node.value
chest_bone.node.value = HumanBoneName.CHEST.value
assert original == list(
map(lambda b: (str(b.node.value), str(b.bone)), human_bones)
)
hips_index = next(
i for i, b in enumerate(human_bones) if b.bone == HumanBoneName.HIPS.value
)
human_bones.remove(hips_index)
Vrm0HumanoidPropertyGroup.fixup_human_bones(armature)
hips_bone = list(filter(lambda b: b.bone == HumanBoneName.HIPS.value, human_bones))[
0
]
assert not hips_bone.node.value
hips_bone.node.value = "hips"
assert set(original) == set(
map(lambda b: (str(b.node.value), str(b.bone)), human_bones)
)
if __name__ == "__main__":
test()
|
11495275
|
import io
import sys
import unittest
from unittest.mock import patch, PropertyMock
from fzfaws.ec2.stop_instance import stop_instance
from fzfaws.ec2 import EC2
from fzfaws.utils import BaseSession
from botocore.stub import Stubber
import boto3
class TestEC2Stop(unittest.TestCase):
def setUp(self):
self.capturedOutput = io.StringIO()
sys.stdout = self.capturedOutput
def tearDown(self):
sys.stdout = sys.__stdout__
@patch("fzfaws.ec2.stop_instance.get_confirmation")
@patch.object(BaseSession, "client", new_callable=PropertyMock)
@patch.object(EC2, "print_instance_details")
@patch.object(EC2, "wait")
@patch.object(EC2, "set_ec2_instance")
def test_stop_instance(
self,
mocked_set_instance,
mocked_wait,
mocked_detail,
mocked_client,
mocked_confirmation,
):
# mock what needed to be mocked
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
response = {"StoppingInstances": []}
stubber.add_response("stop_instances", response)
stubber.activate()
mocked_client.return_value = ec2
mocked_confirmation.return_value = True
stop_instance()
mocked_detail.assert_called_once()
mocked_set_instance.assert_called_once()
mocked_wait.assert_not_called()
self.assertRegex(self.capturedOutput.getvalue(), r".*Instance stop initiated.*")
# mock what needed to be mocked
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
response = {"StoppingInstances": []}
stubber.add_response("stop_instances", response)
stubber.activate()
mocked_client.return_value = ec2
mocked_confirmation.return_value = True
mocked_detail.reset_mock()
mocked_set_instance.reset_mock()
stop_instance(False, False, True, True)
mocked_detail.assert_called_once()
mocked_set_instance.assert_called_once()
mocked_wait.assert_called_once()
self.assertRegex(self.capturedOutput.getvalue(), r".*Instance stop initiated.*")
self.assertRegex(self.capturedOutput.getvalue(), r".*Instance stopped.*")
|
11495314
|
from data_collector import DataCollector
from node import Node
from node_repository import NodeRepository
from stats_repository import StatsRepository
class SeamonServer(object):
@staticmethod
def add_node(name, ip_address, port):
NodeRepository.save(Node(name=name, ip_address=ip_address, port=port))
@staticmethod
def list_nodes():
return NodeRepository.all()
@staticmethod
def get_node_stats(node_name):
node = NodeRepository.by_name(node_name)
data = DataCollector.get_node_info(node)
StatsRepository.save_for_node(node, data)
@staticmethod
def node_by_name(name):
return NodeRepository.by_name(name)
|
11495443
|
import os
from os.path import join
import csv
import cv2, copy
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
import torchaudio
import sys
from scipy.io import wavfile
import json
def read_sal_text(txt_file):
test_list = {'names': [], 'nframes': [], 'fps': []}
with open(txt_file,'r') as f:
for line in f:
word=line.strip().split()
test_list['names'].append(word[0])
test_list['nframes'].append(word[1])
test_list['fps'].append(word[2])
return test_list
def read_sal_text_dave(json_file):
test_list = {'names': [], 'nframes': [], 'fps': []}
with open(json_file,'r') as f:
_dic = json.load(f)
for name in _dic:
# word=line.strip().split()
test_list['names'].append(name)
test_list['nframes'].append(0)
test_list['fps'].append(float(_dic[name]))
return test_list
def make_dataset(annotation_path, audio_path, gt_path, json_file=None):
if json_file is None:
data = read_sal_text(annotation_path)
else:
data = read_sal_text_dave(json_file)
video_names = data['names']
video_nframes = data['nframes']
video_fps = data['fps']
dataset = []
audiodata= {}
for i in range(len(video_names)):
if i % 100 == 0:
print('dataset loading [{}/{}]'.format(i, len(video_names)))
n_frames = len(os.listdir(join(gt_path, video_names[i], 'maps')))
if n_frames <= 1:
print("Less frames")
continue
begin_t = 1
end_t = n_frames
audio_wav_path = os.path.join(audio_path,video_names[i],video_names[i]+'.wav')
if not os.path.exists(audio_wav_path):
print("Not exists", audio_wav_path)
continue
[audiowav,Fs] = torchaudio.load(audio_wav_path, normalization=False)
audiowav = audiowav * (2 ** -23)
n_samples = Fs/float(video_fps[i])
starts=np.zeros(n_frames+1, dtype=int)
ends=np.zeros(n_frames+1, dtype=int)
starts[0]=0
ends[0]=0
for videoframe in range(1,n_frames+1):
startemp=max(0,((videoframe-1)*(1.0/float(video_fps[i]))*Fs)-n_samples/2)
starts[videoframe] = int(startemp)
endtemp=min(audiowav.shape[1],abs(((videoframe-1)*(1.0/float(video_fps[i]))*Fs)+n_samples/2))
ends[videoframe] = int(endtemp)
audioinfo = {
'audiopath': audio_path,
'video_id': video_names[i],
'Fs' : Fs,
'wav' : audiowav,
'starts': starts,
'ends' : ends
}
audiodata[video_names[i]] = audioinfo
return audiodata
def get_audio_feature(audioind, audiodata, clip_size, start_idx):
len_snippet = clip_size
max_audio_Fs = 22050
min_video_fps = 10
max_audio_win = int(max_audio_Fs / min_video_fps * 32)
audioexcer = torch.zeros(1,max_audio_win)
valid = {}
valid['audio']=0
if audioind in audiodata:
excerptstart = audiodata[audioind]['starts'][start_idx+1]
if start_idx+len_snippet >= len(audiodata[audioind]['ends']):
print("Exceeds size", audioind)
sys.stdout.flush()
excerptend = audiodata[audioind]['ends'][-1]
else:
excerptend = audiodata[audioind]['ends'][start_idx+len_snippet]
try:
valid['audio'] = audiodata[audioind]['wav'][:, excerptstart:excerptend+1].shape[1]
except:
pass
audioexcer_tmp = audiodata[audioind]['wav'][:, excerptstart:excerptend+1]
if (valid['audio']%2)==0:
audioexcer[:,((audioexcer.shape[1]//2)-(valid['audio']//2)):((audioexcer.shape[1]//2)+(valid['audio']//2))] = \
torch.from_numpy(np.hanning(audioexcer_tmp.shape[1])).float() * audioexcer_tmp
else:
audioexcer[:,((audioexcer.shape[1]//2)-(valid['audio']//2)):((audioexcer.shape[1]//2)+(valid['audio']//2)+1)] = \
torch.from_numpy(np.hanning(audioexcer_tmp.shape[1])).float() * audioexcer_tmp
else:
print(audioind, "not present in data")
audio_feature = audioexcer.view(1,-1,1)
return audio_feature
class SoundDatasetLoader(Dataset):
def __init__(self, len_snippet, dataset_name='DIEM', split=1, mode='train', use_sound=False, use_vox=False):
''' mode: train, val, save '''
path_data = '/ssd_scratch/cvit/samyak/data/'
self.path_data = path_data
self.use_vox = use_vox
self.use_sound = use_sound
self.mode = mode
self.len_snippet = len_snippet
self.img_transform = transforms.Compose([
transforms.Resize((224, 384)),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
)
])
self.list_num_frame = []
self.dataset_name = dataset_name
if dataset_name=='DIEM':
file_name = 'DIEM_list_{}_fps.txt'.format(mode)
else:
file_name = '{}_list_{}_{}_fps.txt'.format(dataset_name, mode, split)
self.list_indata = []
with open(join(self.path_data, 'fold_lists', file_name), 'r') as f:
# with open(join(self.path_data, 'fold_lists', file_name), 'r') as f:
for line in f.readlines():
name = line.split(' ')[0].strip()
self.list_indata.append(name)
self.list_indata.sort()
print(self.mode, len(self.list_indata))
if self.mode=='train':
self.list_num_frame = [len(os.listdir(os.path.join(path_data,'annotations', dataset_name, v, 'maps'))) for v in self.list_indata]
elif self.mode == 'test' or self.mode == 'val':
print("val set")
for v in self.list_indata:
frames = os.listdir(join(path_data, 'annotations', dataset_name, v, 'maps'))
frames.sort()
for i in range(0, len(frames)-self.len_snippet, 2*self.len_snippet):
if self.check_frame(join(path_data, 'annotations', dataset_name, v, 'maps', 'eyeMap_%05d.jpg'%(i+self.len_snippet))):
self.list_num_frame.append((v, i))
max_audio_Fs = 22050
min_video_fps = 10
self.max_audio_win = int(max_audio_Fs / min_video_fps * 32)
# assert use_sound ^ use_vox == True, (use_sound, use_vox)
if use_sound or use_vox:
if self.mode=='val':
file_name = file_name.replace('val', 'test')
json_file = '{}_fps_map.json'.format(self.dataset_name)
self.audiodata = make_dataset(
join(self.path_data, 'fold_lists', file_name),
join(self.path_data, 'video_audio', self.dataset_name),
join(self.path_data, 'annotations', self.dataset_name),
# vox=use_vox,
# json_file=join(self.path_data, 'DAVE_fold_lists', json_file)
)
def check_frame(self, path):
img = cv2.imread(path, 0)
return img.max()!=0
def __len__(self):
return len(self.list_num_frame)
def __getitem__(self, idx):
# print(self.mode)
if self.mode == "train":
video_name = self.list_indata[idx]
while 1:
start_idx = np.random.randint(0, self.list_num_frame[idx]-self.len_snippet+1)
if self.check_frame(join(self.path_data, 'annotations', self.dataset_name, video_name, 'maps', 'eyeMap_%05d.jpg'%(start_idx+self.len_snippet))):
break
else:
print("No saliency defined in train dataset")
sys.stdout.flush()
elif self.mode == "test" or self.mode == "val":
(video_name, start_idx) = self.list_num_frame[idx]
path_clip = os.path.join(self.path_data, 'video_frames', self.dataset_name, video_name)
path_annt = os.path.join(self.path_data, 'annotations', self.dataset_name, video_name, 'maps')
if self.use_sound:
audio_feature = get_audio_feature(video_name, self.audiodata, self.len_snippet, start_idx)
clip_img = []
for i in range(self.len_snippet):
img = Image.open(join(path_clip, 'img_%05d.jpg'%(start_idx+i+1))).convert('RGB')
sz = img.size
clip_img.append(self.img_transform(img))
clip_img = torch.FloatTensor(torch.stack(clip_img, dim=0))
gt = np.array(Image.open(join(path_annt, 'eyeMap_%05d.jpg'%(start_idx+self.len_snippet))).convert('L'))
gt = gt.astype('float')
if self.mode == "train":
gt = cv2.resize(gt, (384, 224))
if np.max(gt) > 1.0:
gt = gt / 255.0
assert gt.max()!=0, (start_idx, video_name)
if self.use_sound or self.use_vox:
return clip_img, gt, audio_feature
return clip_img, gt
class DHF1KDataset(Dataset):
def __init__(self, path_data, len_snippet, mode="train", multi_frame=0, alternate=1):
''' mode: train, val, save '''
self.path_data = path_data
self.len_snippet = len_snippet
self.mode = mode
self.multi_frame = multi_frame
self.alternate = alternate
self.img_transform = transforms.Compose([
transforms.Resize((224, 384)),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
)
])
if self.mode == "train":
self.video_names = os.listdir(path_data)
self.list_num_frame = [len(os.listdir(os.path.join(path_data,d,'images'))) for d in self.video_names]
elif self.mode=="val":
self.list_num_frame = []
for v in os.listdir(path_data):
for i in range(0, len(os.listdir(os.path.join(path_data,v,'images')))- self.alternate * self.len_snippet, 4*self.len_snippet):
self.list_num_frame.append((v, i))
else:
self.list_num_frame = []
for v in os.listdir(path_data):
for i in range(0, len(os.listdir(os.path.join(path_data,v,'images')))-self.alternate * self.len_snippet, self.len_snippet):
self.list_num_frame.append((v, i))
self.list_num_frame.append((v, len(os.listdir(os.path.join(path_data,v,'images')))-self.len_snippet))
def __len__(self):
return len(self.list_num_frame)
def __getitem__(self, idx):
# print(self.mode)
if self.mode == "train":
file_name = self.video_names[idx]
start_idx = np.random.randint(0, self.list_num_frame[idx]-self.alternate * self.len_snippet+1)
elif self.mode == "val" or self.mode=="save":
(file_name, start_idx) = self.list_num_frame[idx]
path_clip = os.path.join(self.path_data, file_name, 'images')
path_annt = os.path.join(self.path_data, file_name, 'maps')
clip_img = []
clip_gt = []
for i in range(self.len_snippet):
img = Image.open(os.path.join(path_clip, '%04d.png'%(start_idx+self.alternate*i+1))).convert('RGB')
sz = img.size
if self.mode!="save":
gt = np.array(Image.open(os.path.join(path_annt, '%04d.png'%(start_idx+self.alternate*i+1))).convert('L'))
gt = gt.astype('float')
if self.mode == "train":
gt = cv2.resize(gt, (384, 224))
if np.max(gt) > 1.0:
gt = gt / 255.0
clip_gt.append(torch.FloatTensor(gt))
clip_img.append(self.img_transform(img))
clip_img = torch.FloatTensor(torch.stack(clip_img, dim=0))
if self.mode!="save":
clip_gt = torch.FloatTensor(torch.stack(clip_gt, dim=0))
if self.mode=="save":
return clip_img, start_idx, file_name, sz
else:
if self.multi_frame==0:
return clip_img, clip_gt[-1]
return clip_img, clip_gt
class Hollywood_UCFDataset(Dataset):
def __init__(self, path_data, len_snippet, mode="train", frame_no="last", multi_frame=0):
''' mode: train, val, perframe
frame_no: last, middle
'''
self.path_data = path_data
self.len_snippet = len_snippet
self.mode = mode
self.frame_no = frame_no
self.multi_frame = multi_frame
self.img_transform = transforms.Compose([
transforms.Resize((224, 384)),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
)
])
if self.mode == "train":
self.video_names = os.listdir(path_data)
self.list_num_frame = [len(os.listdir(os.path.join(path_data,d,'images'))) for d in self.video_names]
elif self.mode=="val":
self.list_num_frame = []
for v in os.listdir(path_data):
for i in range(0, len(os.listdir(os.path.join(path_data,v,'images')))-self.len_snippet, self.len_snippet):
self.list_num_frame.append((v, i))
if len(os.listdir(os.path.join(path_data,v,'images')))<=self.len_snippet:
self.list_num_frame.append((v, 0))
def __len__(self):
return len(self.list_num_frame)
def __getitem__(self, idx):
if self.mode == "train":
file_name = self.video_names[idx]
start_idx = np.random.randint(0, max(1, self.list_num_frame[idx]-self.len_snippet+1))
elif self.mode == "val":
(file_name, start_idx) = self.list_num_frame[idx]
path_clip = os.path.join(self.path_data, file_name, 'images')
path_annt = os.path.join(self.path_data, file_name, 'maps')
clip_img = []
clip_gt = []
list_clips = os.listdir(path_clip)
list_clips.sort()
list_sal_clips = os.listdir(path_annt)
list_sal_clips.sort()
if len(list_sal_clips)<self.len_snippet:
temp = [list_clips[0] for _ in range(self.len_snippet-len(list_clips))]
temp.extend(list_clips)
list_clips = copy.deepcopy(temp)
temp = [list_sal_clips[0] for _ in range(self.len_snippet-len(list_sal_clips))]
temp.extend(list_sal_clips)
list_sal_clips = copy.deepcopy(temp)
assert len(list_sal_clips) == self.len_snippet and len(list_clips)==self.len_snippet
for i in range(self.len_snippet):
img = Image.open(os.path.join(path_clip, list_clips[start_idx+i])).convert('RGB')
clip_img.append(self.img_transform(img))
gt = np.array(Image.open(os.path.join(path_annt, list_sal_clips[start_idx+i])).convert('L'))
gt = gt.astype('float')
if self.mode == "train":
gt = cv2.resize(gt, (384, 224))
if np.max(gt) > 1.0:
gt = gt / 255.0
clip_gt.append(torch.FloatTensor(gt))
clip_img = torch.FloatTensor(torch.stack(clip_img, dim=0))
if self.multi_frame==0:
gt = clip_gt[-1]
else:
gt = torch.FloatTensor(torch.stack(clip_gt, dim=0))
return clip_img, gt
# class DHF1KDataset(Dataset):
# def __init__(self, path_data, len_snippet, mode="train", frame_no="last"):
# ''' mode: train, val, perframe
# frame_no: last, middle
# '''
# self.path_data = path_data
# self.len_snippet = len_snippet
# self.mode = mode
# self.frame_no = frame_no
# print(self.frame_no)
# self.img_transform = transforms.Compose([
# transforms.Resize((224, 384)),
# transforms.ToTensor(),
# transforms.Normalize(
# [0.485, 0.456, 0.406],
# [0.229, 0.224, 0.225]
# )
# ])
# if self.mode == "train":
# self.video_names = os.listdir(path_data)
# self.list_num_frame = [len(os.listdir(os.path.join(path_data,d,'images'))) for d in self.video_names]
# elif self.mode=="val":
# self.list_num_frame = []
# for v in os.listdir(path_data):
# for i in range(0, len(os.listdir(os.path.join(path_data,v,'images')))-self.len_snippet, self.len_snippet):
# self.list_num_frame.append((v, i))
# else:
# self.list_num_frame = []
# for v in os.listdir(path_data):
# for i in range(0, len(os.listdir(os.path.join(path_data,v,'images')))-self.len_snippet):
# self.list_num_frame.append((v, i, False))
# for i in range(0, len_snippet):
# self.list_num_frame.append((v, i+len_snippet-1, True))
# def __len__(self):
# return len(self.list_num_frame)
# def __getitem__(self, idx):
# isFlip = False
# # print(self.mode)
# if self.mode == "train":
# file_name = self.video_names[idx]
# start_idx = np.random.randint(0, self.list_num_frame[idx]-self.len_snippet+1)
# elif self.mode == "val":
# (file_name, start_idx) = self.list_num_frame[idx]
# else:
# (file_name, start_idx, isFlip) = self.list_num_frame[idx]
# path_clip = os.path.join(self.path_data, file_name, 'images')
# path_annt = os.path.join(self.path_data, file_name, 'maps')
# clip_img = []
# for i in range(self.len_snippet):
# if not isFlip:
# img = Image.open(os.path.join(path_clip, '%04d.png'%(start_idx+i+1))).convert('RGB')
# else:
# img = Image.open(os.path.join(path_clip, '%04d.png'%(start_idx-i+1))).convert('RGB')
# clip_img.append(self.img_transform(img))
# clip_img = torch.FloatTensor(torch.stack(clip_img, dim=0))
# if not isFlip:
# if self.frame_no=="middle":
# gt = np.array(Image.open(os.path.join(path_annt, '%04d.png'%(start_idx+(self.len_snippet)//2))).convert('L'))
# else:
# gt = np.array(Image.open(os.path.join(path_annt, '%04d.png'%(start_idx+self.len_snippet))).convert('L'))
# else:
# gt = np.array(Image.open(os.path.join(path_annt, '%04d.png'%(start_idx-self.len_snippet+2))).convert('L'))
# gt = gt.astype('float')
# if self.mode == "train":
# gt = cv2.resize(gt, (384, 224))
# if np.max(gt) > 1.0:
# gt = gt / 255.0
# return clip_img, torch.FloatTensor(gt)
def get_audio_feature_vox(audioind, audiodata, clip_size, start_idx):
len_snippet = clip_size
# max_audio_Fs = 22050
# min_video_fps = 10
max_audio_win = 48320
audio_feature = torch.zeros(max_audio_win)
# valid = {}
# valid['audio']=0
if audioind in audiodata:
excerptstart = audiodata[audioind]['starts'][start_idx+1]
if start_idx+len_snippet >= len(audiodata[audioind]['ends']):
print("Exceeds size", audioind)
sys.stdout.flush()
excerptend = audiodata[audioind]['ends'][-1]
else:
excerptend = audiodata[audioind]['ends'][start_idx+len_snippet]
# try:
# valid['audio'] = audiodata[audioind]['wav'][:, excerptstart:excerptend+1].shape[1]
# except:
# pass
audio_feature_tmp = audiodata[audioind]['wav'][:, excerptstart:excerptend+1]
if audio_feature_tmp.shape[1]<=audio_feature.shape[0]:
audio_feature[:audio_feature_tmp.shape[1]] = audio_feature_tmp
else:
print("Audio Length Bigger")
audio_feature = audio_feature_tmp[0,:].copy()
# print(audio_feature.shape)
audio_feature = preprocess(audio_feature.numpy()).astype(np.float32)
assert audio_feature.shape == (512,300), audio_feature.shape
audio_feature=np.expand_dims(audio_feature, 2)
return transforms.ToTensor()(audio_feature)
|
11495498
|
import sys
import os
from datetime import datetime, timedelta
input_file = sys.argv[1]
num_workers = 8
if len(sys.argv) > 2:
num_workers = int(sys.argv[2])
input_fd = open(input_file, 'r')
times = []
accuracys = []
time_start = 0
for line in input_fd:
strs = line.split()
if time_start == 0 and (len(strs) == 9 or len(strs) == 10) and strs[6] == 'loss':
time_tuple = datetime.strptime(strs[1], "%H:%M:%S.%f")
time_start = time_tuple.hour * 3600 + time_tuple.minute * 60 + time_tuple.second
if not len(strs) == 11:
continue
if not strs[8] == 'accuracy2' and not strs[8] == 'loss3/top-5':
continue
# times.append(float(strs[5][:-1]))
time_tuple = datetime.strptime(strs[1], "%H:%M:%S.%f")
times.append(time_tuple.hour * 3600 + time_tuple.minute * 60 + time_tuple.second)
accuracys.append(float(strs[10]))
n = len(accuracys)
count = 0
for i in range(n):
if times[i] < times[0]:
times[i] = times[i] + 86400
# print '%i,%i,%f'% (clocks[i], times[i] - times[0], losses[i])
count = count + accuracys[i]
if ((i + 1) % num_workers == 0):
print '%i,%f'% (times[i] - time_start, count / num_workers)
count = 0
|
11495527
|
import gym, snake_gym
import neat
import pickle
env = gym.make("snake-tiled-v0")
state = env.reset()
done = False
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'./config')
genome = pickle.load(open("winner.pkl", "rb"))
net = neat.nn.FeedForwardNetwork.create(genome, config)
reward = 0
while not done:
state = state.flatten()
output = net.activate(state)
output = output.index(max(output))
s, reward, done, info = env.step(output)
state = s
env.close()
print("Length: {}".format(reward))
|
11495559
|
import base64
from flask import Flask, redirect, request, session, url_for, jsonify
from spotify import OAuth, Client
app = Flask(__name__)
app.config['SECRET_KEY'] = 'supersecret'
def get_auth(token=None):
auth = OAuth(
'CLIENT_ID', # Replace these with your client id and secret
'CLIENT_SECRET',
redirect_uri='http://127.0.0.1:5000/callback',
scopes=["user-read-private", "user-top-read",
"ugc-image-upload", "playlist-modify-public",
"playlist-modify-private", ]
)
auth.token = token
return auth
@app.route('/the_app')
def the_app():
token = session.get('spotify_token')
if not token:
return redirect(url_for('authorize'))
client = Client(get_auth(token))
return jsonify([
client.api.me(),
client.api.me_top('artists')
])
# For custom playlist cover endpoint
@app.route('/playlist')
def playlist():
token = session.get('spotify_token')
if not token:
return redirect(url_for('authorize'))
client = Client(get_auth(token))
# Expects a file called "1.jpg" in example directory
with open("1.jpg", "rb") as f:
image = base64.b64encode(f.read()) # encode the image to base64
try:
client.api.user_playlist_custom_cover("2vfGKaDXBH7ZSmGVXVeI5o", image)
return {'status': 'success'}
except Exception as e:
return {'error': str(e)}
@app.route('/authorize')
def authorize():
auth = get_auth()
return redirect(auth.authorize_url)
@app.route('/callback') # redirect uri should point to this
def callback():
auth = get_auth()
auth.request_token(request.url)
session['spotify_token'] = auth.token
return redirect(url_for('the_app'))
|
11495561
|
class Solution:
def largestPalindrome(self, n):
"""
:type n: int
:rtype: int
"""
ans = [9, 987, 123, 597, 677, 1218, 84, 475]
return ans[n - 1]
|
11495576
|
from unittest import mock
from ros_tcp_endpoint.unity_service import UnityService
import rospy
@mock.patch.object(rospy, "Service")
def test_unity_service_send(mock_ros_service):
mock_tcp_server = mock.Mock()
unity_service = UnityService("color", mock.Mock(), mock_tcp_server)
assert unity_service.node_name == "color_service"
unity_service.send("test data")
mock_tcp_server.send_unity_service.assert_called_once()
@mock.patch.object(rospy, "Service")
def test_unity_service_unregister(mock_ros_service):
mock_tcp_server = mock.Mock()
unity_service = UnityService("color", mock.Mock(), mock_tcp_server)
assert unity_service.node_name == "color_service"
unity_service.unregister()
unity_service.service.shutdown.assert_called_once()
|
11495584
|
import mxnet as mx
import argparse
import logging
import os
# load data
def get_mnist_iter(args):
train_image = os.path.join(args.data_url + 'train-images-idx3-ubyte')
train_label = os.path.join(args.data_url + 'train-labels-idx1-ubyte')
try:
import moxing.mxnet as mox
except:
assert os.path.exists(train_image), 'file train-images-idx3-ubyte is not exist,please check your data url'
assert os.path.exists(train_label), 'file train-labels-idx1-ubyte is not exist,please check your data url'
else:
assert mox.file.exists(train_image), 'file train-images-idx3-ubyte is not exist,please check your data url'
assert mox.file.exists(train_image), 'file train-labels-idx1-ubyte is not exist,please check your data url'
train = mx.io.MNISTIter(image=train_image,
label=train_label,
data_shape=(1, 28, 28),
batch_size=args.batch_size,
shuffle=True,
flat=False,
silent=False,
seed=10)
return train
# create network
def get_symbol(num_classes=10, **kwargs):
data = mx.symbol.Variable('data')
data = mx.sym.Flatten(data=data)
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes)
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
return mlp
def fit(args):
# create kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# get train data
train = get_mnist_iter(args)
# create checkpoint
checkpoint = mx.callback.do_checkpoint(args.train_url if kv.rank == 0 else "%s-%d" % (
args.train_url, kv.rank))
# create callbacks after end of every batch
batch_end_callbacks = [mx.callback.Speedometer(args.batch_size, args.disp_batches)]
# get the created network
network = get_symbol(num_classes=args.num_classes)
# create context
devs = mx.cpu() if args.num_gpus == 0 else [mx.gpu(int(i)) for i in range(args.num_gpus)]
# create model
model = mx.mod.Module(context=devs, symbol=network)
# create an initialization method
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)
# create params of optimizer
optimizer_params = {'learning_rate': args.lr, 'wd' : 0.0001}
# run
model.fit(train,
begin_epoch=0,
num_epoch=args.num_epochs,
eval_data=None,
eval_metric=['accuracy'],
kvstore=kv,
optimizer='sgd',
optimizer_params=optimizer_params,
initializer=initializer,
arg_params=None,
aux_params=None,
batch_end_callback=batch_end_callbacks,
epoch_end_callback=checkpoint,
allow_missing=True)
if args.export_model == 1 and args.train_url is not None and len(args.train_url):
import moxing.mxnet as mox
end_epoch = args.num_epochs
save_path = args.train_url if kv.rank == 0 else "%s-%d" % (args.train_url, kv.rank)
params_path = '%s-%04d.params' % (save_path, end_epoch)
json_path = ('%s-symbol.json' % save_path)
logging.info(params_path + 'used to predict')
pred_params_path = os.path.join(args.train_url, 'model', 'pred_model-0000.params')
pred_json_path = os.path.join(args.train_url, 'model', 'pred_model-symbol.json')
mox.file.copy(params_path, pred_params_path)
mox.file.copy(json_path, pred_json_path)
for i in range(1, args.num_epochs + 1, 1):
mox.file.remove('%s-%04d.params' % (save_path, i))
mox.file.remove(json_path)
if __name__ == '__main__':
# parse args
parser = argparse.ArgumentParser(description="train mnist",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num_classes', type=int, default=10,
help='the number of classes')
parser.add_argument('--num_examples', type=int, default=60000,
help='the number of training examples')
parser.add_argument('--data_url', type=str, default='s3://obs-lpf/data/', help='the training data')
parser.add_argument('--lr', type=float, default=0.05,
help='initial learning rate')
parser.add_argument('--num_epochs', type=int, default=10,
help='max num of epochs')
parser.add_argument('--disp_batches', type=int, default=20,
help='show progress for every n batches')
parser.add_argument('--batch_size', type=int, default=128,
help='the batch size')
parser.add_argument('--kv_store', type=str, default='device',
help='key-value store type')
parser.add_argument('--train_url', type=str, default='s3://obs-lpf/ckpt/mnist',
help='the path model saved')
parser.add_argument('--num_gpus', type=int, default='0',
help='number of gpus')
parser.add_argument('--export_model', type=int, default=1, help='1: export model for predict job \
0: not export model')
args, unkown = parser.parse_known_args()
fit(args)
|
11495593
|
import matplotlib.pyplot as plt
import numpy as np
hug = np.loadtxt("hugoniot.txt")
# read in the headings
with open("hugoniot.txt", "r") as f:
line = f.readline()
rho = float(line.split("=")[-1])
line = f.readline()
p = float(line.split("=")[-1])
line = f.readline()
rho_det = float(line.split("=")[-1])
line = f.readline()
p_det = float(line.split("=")[-1])
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.plot(1.0/hug[:, 0], hug[:, 2], lw=2, color="C0", label="detonation adiabat")
ax.plot(1.0/hug[:, 0], hug[:, 1], lw=1, color="C1", label="shock adiabat (no combustion)")
# draw the Rayleigh line
ax.scatter(1/rho, p, marker="x", color="r")
ax.scatter(1/rho_det, p_det, marker="x", color="r")
# draw the Rayleigh line
slope = (p_det - p)/(1/rho_det - 1/rho)
v = 1/hug[:, 0]
ps = p + slope * (v - 1/rho)
ax.plot(v, ps, color="0.5", ls=":", label="Rayleigh line")
ax.set_xlim(0.2e-7, 1.1e-7)
ax.set_ylim(5.e23, 4.e25)
ax.legend(frameon=False)
#ax.set_xscale("log")
#ax.set_yscale("log")
ax.set_xlabel(r"$1/\rho$")
ax.set_ylabel(r"$p$")
fig.savefig("cj_det.png")
|
11495622
|
import numpy as np
from rlberry.envs.benchmarks.ball_exploration.ball2d import get_benchmark_env
from rlberry.agents.torch.ppo import PPOAgent
from rlberry.manager import AgentManager, plot_writer_data, evaluate_agents
from rlberry.exploration_tools.discrete_counter import DiscreteCounter
# --------------------------------
# Define train env
# --------------------------------
env = (get_benchmark_env, dict(level=4))
def uncertainty_estimator_fn(obs_space, act_space):
counter = DiscreteCounter(obs_space,
act_space,
n_bins_obs=20)
return counter
# -----------------------------
# Parameters
# -----------------------------
N_EPISODES = 200
GAMMA = 0.99
HORIZON = 30
BONUS_SCALE_FACTOR = 0.1
MIN_DIST = 0.1
params_ppo = {
'gamma': GAMMA,
'horizon': HORIZON,
'batch_size': 16,
'entr_coef': 8e-7,
'k_epochs': 10,
'eps_clip': 0.2,
'learning_rate': 0.03
}
params_ppo_bonus = {
'gamma': GAMMA,
'horizon': HORIZON,
'batch_size': 16,
'entr_coef': 8e-7,
'k_epochs': 10,
'eps_clip': 0.2,
'learning_rate': 0.03,
'use_bonus': True,
'uncertainty_estimator_kwargs': {
'uncertainty_estimator_fn': uncertainty_estimator_fn}
}
eval_kwargs = dict(eval_horizon=HORIZON, n_simulations=20)
# -----------------------------
# Run AgentManager
# -----------------------------
ppo_stats = AgentManager(
PPOAgent, env, fit_budget=N_EPISODES,
init_kwargs=params_ppo, eval_kwargs=eval_kwargs,
n_fit=4, agent_name='PPO')
ppo_bonus_stats = AgentManager(
PPOAgent, env, fit_budget=N_EPISODES,
init_kwargs=params_ppo_bonus, eval_kwargs=eval_kwargs,
n_fit=4, agent_name='PPO-Bonus')
agent_manager_list = [ppo_bonus_stats, ppo_stats]
for manager in agent_manager_list:
manager.fit()
# learning curves
plot_writer_data(agent_manager_list, tag='episode_rewards',
preprocess_func=np.cumsum,
title='Cumulative Rewards', show=False)
# compare final policies
output = evaluate_agents(agent_manager_list)
print(output)
|
11495643
|
import os
from unittest import TestCase
from runcommands.collection import Collection
from runcommands.command import command
from runcommands.run import run
from runcommands.runner import CommandRunner
CONFIG_FILE = os.path.join(os.path.dirname(__file__), "commands.toml")
@command
def test(a, b, c, d=None):
return a, b, c, d
class TestConfig(TestCase):
def setUp(self):
self.collection = Collection({"test": test})
def read_config_file(self, config_file=CONFIG_FILE):
return run.read_config_file(config_file, self.collection)
def interpolate(self, config):
globals_, default_args, environ = run.interpolate(
config.get("globals") or {},
config.get("args") or {},
config.get("environ") or {},
)
return {
"globals": globals_,
"args": default_args,
"environ": environ,
}
def test_read_config(self):
config = self.read_config_file()
self.assertIn("globals", config)
self.assertIn("args", config)
self.assertIn("environ", config)
self.assertIn("env", config["globals"])
self.assertEqual("test", config["globals"]["env"])
def test_read_config_and_interpolate(self):
config = self.read_config_file()
config = self.interpolate(config)
self.assertEqual(
{"env": "test", "a": "b", "b": "b", "d": "d"}, config["globals"]
)
self.assertEqual({"test": {"a": "b", "b": "b", "d": "x"}}, config["args"])
self.assertEqual({"XXX": "b", "XYZ": "b"}, config["environ"])
def test_read_config_then_call_command(self):
config = self.read_config_file()
config = self.interpolate(config)
runner = CommandRunner(self.collection)
self.collection.set_default_args(config["args"])
# Uses default args
result = runner.run(["test", "c"])[0]
self.assertEqual(("b", "b", "c", "x"), result)
# Uses some default args
result = runner.run(["test", "--a", "a", "c"])[0]
self.assertEqual(("a", "b", "c", "x"), result)
# Uses no default args
result = runner.run(["test", "--a", "x", "--b", "y", "c", "-d", "z"])[0]
self.assertEqual(("x", "y", "c", "z"), result)
|
11495653
|
import os
import numpy as np
import george
from george import kernels
from scipy import integrate
class sigmad_gp:
def __init__(self):
print('Initialize sigma_d emulator')
self.cosmos = np.loadtxt(os.path.dirname(
os.path.abspath(__file__)) + '/../data/cparams_4d.dat')
self.ydata = np.loadtxt(os.path.dirname(os.path.abspath(
__file__)) + '/../learned_data/sigmad/coeff_all.dat')
self.yavg = np.loadtxt(os.path.dirname(os.path.abspath(
__file__)) + '/../learned_data/sigmad/sigd_avg.dat')
self.ystd = np.loadtxt(os.path.dirname(os.path.abspath(
__file__)) + '/../learned_data/sigmad/sigd_std.dat')
self.gp_params = np.loadtxt(os.path.dirname(os.path.abspath(
__file__)) + '/../learned_data/sigmad/gp_params.dat')
self.ktypes = np.loadtxt(os.path.dirname(os.path.abspath(
__file__)) + '/../learned_data/sigmad/ktypes.dat')
if self.ktypes == 10:
kernel = 1. * \
kernels.Matern52Kernel(np.ones(4), ndim=4) + \
kernels.ConstantKernel(1e-4, ndim=4)
elif self.ktypes == 6:
kernel = 1. * \
kernels.ExpSquaredKernel(
np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4)
else:
print('kernel type 6 and 10 are the only supported types.')
self.gp = george.GP(kernel)
self.gp.compute(self.cosmos[:800])
self.gp.set_parameter_vector(self.gp_params)
self.As_fid = np.exp(3.094)
def get(self, cosmo):
cparams = cosmo.get_cosmology()[0]
if not np.isclose(cparams[5], -1):
growth_wcdm = _linearGrowth(
cparams[2], cparams[5], 0.)/_linearGrowth(cparams[2], cparams[5], 1000.)
growth_lcdm = _linearGrowth(
cparams[2], -1., 0.)/_linearGrowth(cparams[2], -1., 1000.)
return growth_wcdm/growth_lcdm * np.sqrt(np.exp(cparams[3]) / self.As_fid) * (self.ystd*self.gp.predict(self.ydata[:800], np.atleast_2d(cparams)[:, [0, 1, 2, 4]], return_cov=False)[0]+self.yavg)
else:
return np.sqrt(np.exp(cparams[3]) / self.As_fid) * (self.ystd*self.gp.predict(self.ydata[:800], np.atleast_2d(cparams)[:, [0, 1, 2, 4]], return_cov=False)[0]+self.yavg)
def _get_params(self, cparams):
if not np.isclose(cparams[5], -1):
growth_wcdm = _linearGrowth(
cparams[2], cparams[5], 0.)/_linearGrowth(cparams[2], cparams[5], 1000.)
growth_lcdm = _linearGrowth(
cparams[2], -1., 0.)/_linearGrowth(cparams[2], -1., 1000.)
return growth_wcdm/growth_lcdm * np.sqrt(np.exp(cparams[3]) / self.As_fid) * (self.ystd*self.gp.predict(self.ydata[:800], np.atleast_2d(cparams)[:, [0, 1, 2, 4]], return_cov=False)[0]+self.yavg)
else:
return np.sqrt(np.exp(cparams[3]) / self.As_fid) * (self.ystd*self.gp.predict(self.ydata[:800], np.atleast_2d(cparams)[:, [0, 1, 2, 4]], return_cov=False)[0]+self.yavg)
def _linearGrowth(Ode, wde, z):
Om = 1 - Ode
a_scale = 1./(1.+z)
alpha = -1./(3.*wde)
beta = (wde-1.)/(2.*wde)
gamma = 1.-5./(6.*wde)
x = -Ode/Om * a_scale**(-3.*wde)
res = integrate.quad(lambda t: t**(beta-1.)*(1.-t) **
(gamma-beta-1.)*(1.-t*x)**(-alpha), 0, 1.)
return a_scale * res[0]
|
11495660
|
from sklearn_explain.tests.skl_datasets import skl_datasets_test as skltest
skltest.test_class_dataset_and_model("BinaryClass_500" , "SGDClassifier_3")
|
11495661
|
import math
from military.soldier import TROOP_RADIUS
class RankPosition:
def __init__(self, unit, rank, position, canvas):
self.unit = unit
self.rank = rank
self.position = position
self.canvas = canvas
self.x, self.y = 0, 0
self.soldier = None
def change_soldier(self, soldier):
if soldier.rank_position is not None:
soldier.rank_position.soldier = None
soldier.rank_position = self
self.soldier = soldier
def has_soldier(self):
return self.soldier is not None
def calculate_position(self):
d1 = ((len(self.unit.ranks[0]) - 1) / 2.0 - self.position) * (TROOP_RADIUS + 1)
d2 = -self.rank * (TROOP_RADIUS + 1)
t1 = math.atan2(self.unit.dy, self.unit.dx) + math.pi / 2.0
t2 = t1 - math.pi / 2.0
self.x = self.unit.x + math.cos(t1) * d1 + math.cos(t2) * d2
self.y = self.unit.y + math.sin(t1) * d1 + math.sin(t2) * d2
|
11495668
|
import pytest
from e2e_tests.conftest import TestPorts, run_tunneler_container, TunnelerType, TunneledType, \
run_test_tcp_single_client_single_short_echo, run_test_tcp_single_client_multiple_short_echo, \
run_test_tcp_single_client_single_long_echo, run_test_tcp_multiple_clients_single_short_echo, \
run_test_tcp_multiple_tunnels_single_short_echo, run_test_tcp_server_long_response_and_empty_acks, \
run_test_udp_single_client_single_short_echo, run_test_udp_single_client_multiple_short_echo, \
run_test_udp_single_client_single_long_echo, run_test_udp_multiple_tunnels_single_short_echo
@pytest.mark.asyncio
async def test_tcp_single_client_single_short_echo(tcp_echo_server, tcp_over_tcp_server, tcp_over_tcp_client) -> None:
await run_test_tcp_single_client_single_short_echo()
@pytest.mark.asyncio
async def test_tcp_single_client_multiple_short_echo(tcp_echo_server, tcp_over_tcp_server, tcp_over_tcp_client) -> None:
await run_test_tcp_single_client_multiple_short_echo()
@pytest.mark.asyncio
async def test_tcp_single_client_single_long_echo(tcp_echo_server, tcp_over_tcp_server, tcp_over_tcp_client) -> None:
await run_test_tcp_single_client_single_long_echo()
@pytest.mark.asyncio
async def test_tcp_multiple_clients_single_short_echo(tcp_echo_server, tcp_over_tcp_server, tcp_over_tcp_client) -> None:
await run_test_tcp_multiple_clients_single_short_echo()
@pytest.mark.asyncio
async def test_tcp_multiple_tunnels_single_short_echo(tcp_echo_server, tcp_over_tcp_server, client_image, tcp_over_tcp_client) -> None:
container = run_tunneler_container(client_image,
'test_another_client',
TunnelerType.TCP,
TunneledType.TCP,
TestPorts.TUNNELER_PORT.value + 1,
TestPorts.UNTUNNELER_PORT)
await run_test_tcp_multiple_tunnels_single_short_echo(container)
@pytest.mark.asyncio
async def test_tcp_server_long_response_and_empty_acks(redis_server, tcp_over_tcp_server, tcp_over_tcp_client) -> None:
await run_test_tcp_server_long_response_and_empty_acks()
@pytest.mark.asyncio
async def test_udp_single_client_single_short_echo(udp_echo_server, udp_over_tcp_server, udp_over_tcp_client) -> None:
await run_test_udp_single_client_single_short_echo()
@pytest.mark.asyncio
async def test_udp_single_client_multiple_short_echo(udp_echo_server, udp_over_tcp_server, udp_over_tcp_client) -> None:
await run_test_udp_single_client_multiple_short_echo()
@pytest.mark.asyncio
async def test_udp_single_client_single_long_echo(udp_echo_server, udp_over_tcp_server, udp_over_tcp_client) -> None:
await run_test_udp_single_client_single_long_echo(10_000)
@pytest.mark.asyncio
async def test_udp_multiple_tunnels_single_short_echo(udp_echo_server, udp_over_tcp_server, client_image, udp_over_tcp_client) -> None:
container = run_tunneler_container(client_image,
'test_another_client',
TunnelerType.TCP,
TunneledType.UDP,
TestPorts.TUNNELER_PORT.value + 1,
TestPorts.UNTUNNELER_PORT)
await run_test_udp_multiple_tunnels_single_short_echo(container)
|
11495677
|
import os
import sys
from abc import abstractmethod
class AbstractRemoteObject():
'''This is an abstract class that all RemoteObjects will
inherit from. This is an abstract class to ridgidly define
the abstract methods of this RemoteObject class'''
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
@abstractmethod
def exists(self):
pass
@abstractmethod
def download(self):
pass
@abstractmethod
def upload(self):
pass
@abstractmethod
def delete_file(self):
pass
|
11495706
|
from setuptools import setup, find_packages
import os
import re
def hoplite_plugins():
hoplite_top_level = os.path.split(__file__)[0]
plugin_dir = os.path.join(hoplite_top_level, "hoplite", "builtin_plugins")
plugins = []
for directory, dirnames, filenames in os.walk(plugin_dir):
for filename in filenames:
match = re.search('(\w+_job)\.py$', filename)
if match:
fqdn = 'hoplite.plugins.{0}'.format(match.group(1))
module_name = 'hoplite.builtin_plugins.{0}'.format(match.group(1))
entry = '{0} = {1}'.format(fqdn, module_name)
plugins.append(entry)
return plugins
setup(
name="hoplite",
version="15.0.0.dev21",
packages=find_packages(exclude='tests'),
license='MIT',
install_requires=['flask>=0.10.1',
'requests>=2.2.0',
'argparse>=1.1',
'pymongo>=3.0',
'tornado',
'tblib'],
entry_points={
'console_scripts': [
'hoplite-server = hoplite.main:server_main',
'hoplite-client = hoplite.main:client_main',
'hoplite-auto-start = hoplite.auto_start:main'
],
'hoplite.jobs': hoplite_plugins()
},
include_package_data=True,
package_data={
'': ['*.cfg']
}
)
|
11495720
|
import argparse
import logging
import os
import cv2 as cv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.stats import norm
import math
from config import im_size, epsilon, epsilon_sqr, device
from scipy.ndimage import gaussian_filter, morphology
from skimage.measure import label, regionprops
def clip_gradient(optimizer, grad_clip):
"""
Clips gradients computed during backpropagation to avoid explosion of gradients.
:param optimizer: optimizer with the gradients to be clipped
:param grad_clip: clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(epoch, epochs_since_improvement, model, optimizer, loss, is_best):
state = {'epoch': epoch,
'epochs_since_improvement': epochs_since_improvement,
'loss': loss,
'model': model,
'optimizer': optimizer}
# filename = 'checkpoint_' + str(epoch) + '_' + str(loss) + '.tar'
filename = 'checkpoint.tar'
torch.save(state, filename)
# If this checkpoint is the best so far, store a copy so it doesn't get overwritten by a worse checkpoint
if is_best:
torch.save(state, 'BEST_checkpoint.tar')
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, shrink_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param shrink_factor: factor in interval (0, 1) to multiply learning rate with.
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * shrink_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
def get_learning_rate(optimizer):
return optimizer.param_groups[0]['lr']
def accuracy(scores, targets, k=1):
batch_size = targets.size(0)
_, ind = scores.topk(k, 1, True, True)
correct = ind.eq(targets.view(-1, 1).expand_as(ind))
correct_total = correct.view(-1).float().sum() # 0D tensor
return correct_total.item() * (100.0 / batch_size)
def parse_args():
parser = argparse.ArgumentParser(description='Train face network')
# general
parser.add_argument('--end-epoch', type=int, default=30, help='training epoch size.')
parser.add_argument('--lr', type=float, default=0.01, help='start learning rate')
parser.add_argument('--lr-step', type=int, default=10, help='period of learning rate decay')
parser.add_argument('--optimizer', default='Adam', help='optimizer')
parser.add_argument('--weight-decay', type=float, default=0.0, help='weight decay')
parser.add_argument('--mom', type=float, default=0.9, help='momentum')
parser.add_argument('--batch-size', type=int, default=16, help='batch size in each context')
parser.add_argument('--checkpoint', type=str, default=None, help='checkpoint')
parser.add_argument('--n_features', type=int, default=32, help='feature numbers')
parser.add_argument('--KD_type', type=str, default='batch,spatial,channel', help='knowledge distillation type')
parser.add_argument('--feature_layer', type=str, default='[1,2,3,4]', help='feature selected')
parser.add_argument('--KD_weight', type=str, default='[1,1,1]', help='distillation loss weight')
args = parser.parse_args()
return args
def get_logger():
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(levelname)s \t%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
def safe_crop(mat, x, y, crop_size=(im_size, im_size)):
crop_height, crop_width = crop_size
if len(mat.shape) == 2:
ret = np.zeros((crop_height, crop_width), np.uint8)
else:
ret = np.zeros((crop_height, crop_width, 3), np.uint8)
crop = mat[y:y + crop_height, x:x + crop_width]
h, w = crop.shape[:2]
ret[0:h, 0:w] = crop
if crop_size != (im_size, im_size):
ret = cv.resize(ret, dsize=(im_size, im_size), interpolation=cv.INTER_NEAREST)
return ret
# alpha prediction loss: the abosolute difference between the ground truth alpha values and the
# predicted alpha values at each pixel. However, due to the non-differentiable property of
# absolute values, we use the following loss function to approximate it.
def alpha_prediction_loss(y_pred, y_true, mask=None):
if mask is not None:
mask = mask
#diff = y_pred[:, 0, :] - y_true
else:
mask = y_true[:, 1, :]
diff = y_pred[:, 0, :] - y_true[:, 0, :]
diff = diff * mask
num_pixels = torch.sum(mask)
return torch.sum(torch.sqrt(torch.pow(diff, 2) + epsilon_sqr)) / (num_pixels + epsilon)
# compute the MSE error given a prediction, a ground truth and a trimap.
# pred: the predicted alpha matte
# target: the ground truth alpha matte
# trimap: the given trimap
#
def compute_mse(pred, alpha, mask):
num_pixels = mask.sum()
return ((pred - alpha) ** 2).sum() / num_pixels
# compute the SAD error given a prediction and a ground truth.
#
def compute_sad(pred, alpha):
diff = np.abs(pred - alpha)
return np.sum(diff) / 1000
def compute_grad(pd, gt, mask):
pd_x = gaussian_filter(pd, sigma=1.4, order=[1, 0], output=np.float32)
pd_y = gaussian_filter(pd, sigma=1.4, order=[0, 1], output=np.float32)
gt_x = gaussian_filter(gt, sigma=1.4, order=[1, 0], output=np.float32)
gt_y = gaussian_filter(gt, sigma=1.4, order=[0, 1], output=np.float32)
pd_mag = np.sqrt(pd_x ** 2 + pd_y ** 2)
gt_mag = np.sqrt(gt_x ** 2 + gt_y ** 2)
error_map = np.square(pd_mag - gt_mag)
loss = np.sum(error_map * mask) / 10
return loss
# compute the connectivity error
def compute_connectivity(pd, gt, mask, step=0.1):
h, w = pd.shape
thresh_steps = np.arange(0, 1.1, step)
l_map = -1 * np.ones((h, w), dtype=np.float32)
lambda_map = np.ones((h, w), dtype=np.float32)
for i in range(1, thresh_steps.size):
pd_th = pd >= thresh_steps[i]
gt_th = gt >= thresh_steps[i]
label_image = label(pd_th & gt_th, connectivity=1)
cc = regionprops(label_image)
size_vec = np.array([c.area for c in cc])
if len(size_vec) == 0:
continue
max_id = np.argmax(size_vec)
coords = cc[max_id].coords
omega = np.zeros((h, w), dtype=np.float32)
omega[coords[:, 0], coords[:, 1]] = 1
flag = (l_map == -1) & (omega == 0)
l_map[flag == 1] = thresh_steps[i - 1]
dist_maps = morphology.distance_transform_edt(omega == 0)
dist_maps = dist_maps / dist_maps.max()
# lambda_map[flag == 1] = dist_maps.mean()
l_map[l_map == -1] = 1
# the definition of lambda is ambiguous
d_pd = pd - l_map
d_gt = gt - l_map
# phi_pd = 1 - lambda_map * d_pd * (d_pd >= 0.15).astype(np.float32)
# phi_gt = 1 - lambda_map * d_gt * (d_gt >= 0.15).astype(np.float32)
phi_pd = 1 - d_pd * (d_pd >= 0.15).astype(np.float32)
phi_gt = 1 - d_gt * (d_gt >= 0.15).astype(np.float32)
loss = np.sum(np.abs(phi_pd - phi_gt) * mask) / 1000
return loss
def draw_str(dst, target, s):
x, y = target
cv.putText(dst, s, (x + 1, y + 1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness=2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
def ensure_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def over_all_loss(student_out, teacher_out, alpha, student_fms, teacher_fms,
KD_type, feature_maps, KD_weight):
mask = alpha[:, 1, :]
KD_weight = eval(KD_weight)
l2 = nn.MSELoss()
DS_loss = alpha_prediction_loss(student_out, alpha)
TS_loss = alpha_prediction_loss(student_out, teacher_out, mask)
loss = (DS_loss + TS_loss) / 2
aggregated_student_fms = []
aggregated_teacher_fms = []
# using feature maps
selected_student_fms = [student_fms[ind] for ind in eval(feature_maps)]
selected_teacher_fms = [teacher_fms[ind] for ind in eval(feature_maps)]
# for channel, FSP
revised_student_fms = [student_fms[ind+9] for ind in eval(feature_maps)]
revised_teacher_fms = [teacher_fms[ind] for ind in eval(feature_maps)]
if 'hilton' not in KD_type:
if 'batch' in KD_type:
print('batch')
aggregated_student_fms.append([batch_similarity(fm) for fm in selected_student_fms])
aggregated_teacher_fms.append([batch_similarity(fm) for fm in selected_teacher_fms])
if 'spatial' in KD_type:
#print('S')
aggregated_student_fms.append([spatial_similarity(fm) for fm in selected_student_fms])
aggregated_teacher_fms.append([spatial_similarity(fm) for fm in selected_teacher_fms])
if 'channel' in KD_type:
#print('C')
aggregated_student_fms.append([channel_similarity(fm) for fm in revised_student_fms])
aggregated_teacher_fms.append([channel_similarity(fm) for fm in revised_teacher_fms])
if 'FSP' in KD_type:
print('F')
aggregated_student_fms.append([FSP(revised_student_fms[i], revised_student_fms[i+1]) for i in range(len(revised_student_fms)-1)])
aggregated_teacher_fms.append([FSP(revised_teacher_fms[i], revised_teacher_fms[i+1]) for i in range(len(revised_student_fms)-1)])
if 'AT' in KD_type:
print('AT')
aggregated_student_fms.append([AT(fm) for fm in selected_student_fms])
aggregated_teacher_fms.append([AT(fm) for fm in selected_teacher_fms])
# KD loss
for i in range(len(aggregated_student_fms)):
for j in range(len(aggregated_student_fms[i])):
loss += l2(aggregated_student_fms[i][j], aggregated_teacher_fms[i][j]) * KD_weight[i]
return loss
class Distiller(nn.Module):
def __init__(self, t_net, s_net):
super(Distiller, self).__init__()
teacher_bns = t_net.get_bn_before_relu()
margins = [get_margin_from_BN(bn) for bn in teacher_bns]
for i, margin in enumerate(margins):
self.register_buffer('margin%d' % (i+1), margin.unsqueeze(1).unsqueeze(2).unsqueeze(0).detach())
self.t_net = t_net
self.s_net = s_net
def forward(self, x):
t_feats, t_out = self.t_net.extract_feature(x)
s_feats, s_out = self.s_net.extract_feature(x)
feat_num = 4
loss_distill = 0
for i in range(feat_num):
loss_distill += distillation_loss(s_feats[i], t_feats[i].detach(), getattr(self, 'margin%d' % (i+1))) \
/ 2 ** (feat_num - i - 1)
return t_out, s_out, loss_distill
def get_margin_from_BN(bn):
margin = []
std = bn.weight.data
mean = bn.bias.data
for (s, m) in zip(std, mean):
s = abs(s.item())
m = m.item()
if norm.cdf(-m / s) > 0.001:
margin.append(- s * math.exp(- (m / s) ** 2 / 2) / math.sqrt(2 * math.pi) / norm.cdf(-m / s) + m)
else:
margin.append(-3 * s)
return torch.FloatTensor(margin).to(std.device)
def distillation_loss(source, target, margin):
target = torch.max(target, margin)
loss = torch.nn.functional.mse_loss(source, target, reduction="none")
loss = loss * ((source > target) | (target > 0)).float()
return loss.sum()
def batch_similarity(fm): # batch similarity
fm = fm.view(fm.size(0), -1)
Q = torch.mm(fm, fm.transpose(0,1))
normalized_Q = Q / torch.norm(Q,2,dim=1).unsqueeze(1).expand(Q.shape)
return normalized_Q
def spatial_similarity(fm): # spatial similarity
fm = fm.view(fm.size(0), fm.size(1),-1)
norm_fm = fm / (torch.sqrt(torch.sum(torch.pow(fm,2), 1)).unsqueeze(1).expand(fm.shape) + 0.0000001 )
s = norm_fm.transpose(1,2).bmm(norm_fm)
s = s.unsqueeze(1)
return s
def channel_similarity(fm): # channel_similarity
fm = fm.view(fm.size(0), fm.size(1), -1)
norm_fm = fm / (torch.sqrt(torch.sum(torch.pow(fm,2), 2)).unsqueeze(2).expand(fm.shape) + 0.0000001)
s = norm_fm.bmm(norm_fm.transpose(1,2))
s = s.unsqueeze(1)
return s
def FSP(fm1, fm2):
if fm1.size(2) > fm2.size(2):
fm1 = F.adaptive_avg_pool2d(fm1, (fm2.size(2), fm2.size(3)))
fm1 = fm1.view(fm1.size(0), fm1.size(1), -1)
fm2 = fm2.view(fm2.size(0), fm2.size(1), -1).transpose(1,2)
fsp = torch.bmm(fm1, fm2) / fm1.size(2)
return fsp
def AT(fm):
eps = 1e-6
am = torch.pow(torch.abs(fm), 2)
am = torch.sum(am, dim=1, keepdim=True)
norm = torch.norm(am, dim=(2, 3), keepdim=True)
am = torch.div(am, norm + eps)
return am
|
11495754
|
import pytest
import numpy as np
from orbit.eda import eda_plot
def test_eda_plot(iclaims_training_data):
df = iclaims_training_data
df['claims'] = np.log(df['claims'])
# test plotting
_ = eda_plot.ts_heatmap(df=df, date_col='week', value_col='claims', seasonal_interval=52, normalization=True)
var_list = ['trend.unemploy', 'trend.filling', 'trend.job']
_ = eda_plot.correlation_heatmap(df, var_list=var_list)
_ = eda_plot.dual_axis_ts_plot(df=df, var1='trend.unemploy', var2='claims', date_col='week')
df[['week'] + var_list].melt(id_vars=['week'])
_ = eda_plot.wrap_plot_ts(df, 'week', ['week'] + var_list)
|
11495756
|
import numpy as np
from pyquante2.grid.lebedev import lebedev
class atomic_grid(object):
def __init__(self,atom,**kwargs):
atno,x,y,z = atom.atuple()
if kwargs.get('radial','EulerMaclaurin') == 'Legendre':
grid_params = LegendreGrid(atno,**kwargs)
else:
grid_params = EulerMaclaurinGrid(atno,**kwargs)
self.points = []
for rrad,wrad,nang in grid_params:
for xang,yang,zang,wang in lebedev[nang]:
w = wrad*wang
self.points.append((rrad*xang+x,rrad*yang+y,rrad*zang+z,w))
self.points = np.array(self.points,dtype=float)
self.npts = self.points.shape[0]
return
# The following two routines return [(ri,wi,nangi)] for nrad shells.
# The ri's are properly adjusted to go to the proper distances.
# The wi's are adjusted to only have to be multiplied by wrad from
# the lebedev shell
def EulerMaclaurinGrid(Z,**opts):
nrad = opts.get('nrad',32)
do_sg1 = opts.get('do_sg1',True)
nang = opts.get('nang',194)
radial = EulerMaclaurinRadialGrid(nrad,Z)
if do_sg1:
grid = [(r,w,SG1Angs(r,Z)) for r,w in radial]
else:
grid = [(r,w,nang) for r,w in radial]
return grid
def LegendreGrid(Z,**kwargs):
from pyquante2.constants import ang2bohr
from pyquante2.grid.data import Bragg
from pyquante2.grid.legendre import legendre
Rmax = 0.5*Bragg[Z]*ang2bohr
nrad = kwargs.get('nrad',32)
fineness = kwargs.get('fineness',1)
radial = legendre[nrad]
grid = []
for i in range(nrad):
xrad,wrad = radial[i]
rrad = BeckeRadMap(xrad,Rmax)
dr = 2*Rmax/pow(1-xrad,2)
vol = 4*np.pi*rrad*rrad*dr
nangpts = ang_mesh(float(i+1)/nrad,fineness)
grid.append((rrad,wrad*vol,nangpts))
return grid
def BeckeRadMap(x,Rmax):
return Rmax*(1.0+x)/(1.0-x)
def ang_mesh(frac,fineness,alevs = None):
"""\
Determine the number of points in the angular mesh based on
the fraction of the total radial grid index frac c (0,1).
You can optionally pass in the number of points for
the 5 different regions
"""
if not alevs:
ang_levels = [
[ 6, 14, 26, 26, 14], # Coarse
[ 50, 50,110, 50, 26], # Medium
[ 50,110,194,110, 50], # Fine
[194,194,194,194,194] # ultrafine
]
alevs = ang_levels[fineness]
nang = alevs[0]
if frac > 0.4: nang = alevs[1]
if frac > 0.5: nang = alevs[2]
if frac > 0.7: nang = alevs[3]
if frac > 0.8: nang = alevs[4]
return nang
def EulerMaclaurinRadialGrid(nrad,Z):
from pyquante2.grid.data import PopleRadii
# Radial part of the Gill, Johnson, Pople SG-1 grid
R = PopleRadii[Z]
grid = []
for i in range(1,nrad+1):
# Changed to include a factor of 4pi
#w = 2.*pow(R,3)*(nrad+1.)*pow(i,5)*pow(nrad+1-i,-7)
w = 8.*np.pi*pow(R,3)*(nrad+1.)*pow(i,5)*pow(nrad+1-i,-7)
r = R*i*i*pow(nrad+1-i,-2)
grid.append((r,w))
return grid
def SG1Angs(r,Z):
from pyquante2.grid.data import PopleRadii
# Gill, Johnson, Pople rules for SG-1 angular densities
R = PopleRadii[Z]
if Z in range(1,3): # H-He
alphas = [0.25,0.5,1.0,4.5]
elif Z in range(3,11): # Li-Ne
alphas = [0.1667, 0.500, 0.900, 3.5]
else: # only fit for Na-Ar
alphas = [0.1,0.4,0.8,2.5]
if r < alphas[0]*R: return 6
elif r < alphas[1]*R: return 38
elif r < alphas[2]*R: return 86
elif r < alphas[3]*R: return 194
return 86
if __name__ == '__main__':
import pylab
nrad = 32
for atno in [8,1]:
#print('lgrid: ')
#print('elgrid: ',[w for r,w,n in EulerMaclaurinGrid(nrad,atno)])
pylab.semilogy([w for r,w,n in LegendreGrid(nrad,atno)],label='L%d'%atno)
pylab.semilogy([w for r,w,n in EulerMaclaurinGrid(nrad,atno)],label='EL%d'%atno)
pylab.legend(loc='lower right')
pylab.title("Radial weights for DFT Grids")
pylab.show()
|
11495780
|
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from .baseentity import FordpassEntity
from .const import STATES_MANAGER,FORD_VEHICLES
async def async_setup_entry(hass, config_entry, async_add_entities):
vehicles = []
states_manager = hass.data[config_entry.entry_id][STATES_MANAGER]
for single_vehicle in hass.data[config_entry.entry_id][FORD_VEHICLES]:
dev = FordVehicleTracker(states_manager, single_vehicle)
vehicles.append(dev)
async_add_entities(vehicles)
class FordVehicleTracker(FordpassEntity, TrackerEntity):
@property
def source_type(self) -> str:
return SOURCE_TYPE_GPS
@property
def latitude(self):
return self._vehicle.latitude
@property
def longitude(self):
return self._vehicle.longitude
@property
def name(self):
return self._vehicle.name
@property
def icon(self):
return "mdi:car-sports"
|
11495788
|
import warnings
from datetime import datetime, timedelta
from .workbook import Workbook
from .xlsbpackage import XlsbPackage
__version__ = '0.0.8'
def open_workbook(name, *args, **kwargs):
"""Opens the given workbook file path.
Args:
name (str): The name of the XLSB file to open.
Returns:
Workbook: The workbook instance for the given file name.
Examples:
This is typically the entrypoint to start working with an XLSB file:
>>> from pyxlsb import open_workbook
>>> with open_workbook('test_files/test.xlsb') as wb:
... print(wb.sheets)
...
['Test']
"""
return Workbook(XlsbPackage(name), *args, **kwargs)
|
11495823
|
import os
from moviepy.editor import *
from moviepy.video import fx
ABS_PATH = os.path.abspath(__file__)
BASE_DIR = os.path.dirname(os.path.dirname(ABS_PATH))
DATA_DIR = os.path.join(BASE_DIR, "data")
SAMPLE_DIR = os.path.join(DATA_DIR, "samples")
SAMPLE_INPUTS = os.path.join(SAMPLE_DIR, "inputs")
SAMPLE_OUTPUTS = os.path.join(SAMPLE_DIR, 'outputs')
os.makedirs(DATA_DIR, exist_ok=True)
os.makedirs(SAMPLE_INPUTS, exist_ok=True)
os.makedirs(SAMPLE_OUTPUTS, exist_ok=True)
|
11495856
|
from random import randrange
class MillerRabinPrimalityTest:
def __call__(self, n: int, k: int = 10) -> bool:
if n == 2:
return True
if not n & 1:
return False
def check(a, s, d, n):
x = pow(a, d, n)
if x == 1:
return True
for i in range(0, s - 1):
if x == n - 1:
return True
x = pow(x, 2, n)
return x == n - 1
s = 0
d = n - 1
while d % 2 == 0:
d >>= 1
s += 1
for _ in range(0, k):
a = randrange(2, n)
if not check(a, s, d, n):
return False
return True
|
11495874
|
class DocSimilarity:
def __init__(self, model, docs):
from signs.similarity import doc_similarity as sims
from signs.utils.html_print import html_print
self._sims = sims
self._model = model
self._docs = docs
self._html_print = html_print
def seen_matrix(self):
'''creates a 2d matrix with similarities'''
return self._sims.seen_similarity_matrix(self._model)
def unseen_matrix(self, docs):
'''same as above but for unseen docs'''
return self._sims.unseen_similarity_matrix(self._model, docs)
def similar_docs(self, doc):
'''for comparing a single doc against all seen docs'''
return self._sims.similarity_docs(doc, self._model)
def spatial_distance(self, doc1, doc2):
'''for comparing two unseen or seen docs'''
return self._sims.vector_spatial_distance(self._model,
doc1,
doc2)
def preview_results(self, docs=None):
if docs is None:
# get the keys from docs with similarities
similarities = self._get_similarities(self._docs.docs())
else:
similarities = self._get_similarities(docs)
# print out the highest and lowest match
self._print_highest(similarities)
self._print_lowest(similarities)
def _get_similarities(self, docs):
import random
# pick a ramdom document
doc_id = random.randint(0, len(docs))
# find similar documents
similarities = self._sims.similarity_docs(docs[doc_id], self._model)
return similarities
def _print_highest(self, similarities):
# create content
text = self._docs.docs(False)[list(similarities.keys())[0]][0]
# create similarity value and round
similarity = similarities[list(similarities.keys())[0]]
similarity = round(similarity, 4)
# parse together the title
title = "HIGHEST MATCH : " + str(similarity)
# print it out
self._html_print(text, title)
def _print_lowest(self, similarities):
# create content
text = self._docs.docs(False)[list(similarities.keys())[-1]][0]
# create similarity value and round
similarity = similarities[list(similarities.keys())[-1]]
similarity = round(similarity, 4)
# parse together the title
title = "LOWEST MATCH : " + str(similarity)
# print it out
self._html_print(text, title)
|
11495875
|
from django.core.cache import cache
from rest_framework.authentication import (BaseAuthentication,
get_authorization_header)
from .exceptions import PermissionDenied
from .models import check_auth_token
from ..profiles.models import User
class GoogleLoginAuthentication(BaseAuthentication):
def authenticate_header(self, request):
return 'GoogleLogin'
def authenticate(self, request):
"""GoogleLogin auth=<token>"""
auth = get_authorization_header(request).decode('utf-8').split()
if not auth or auth[0].lower() != 'googlelogin':
raise PermissionDenied()
if len(auth) == 1:
raise PermissionDenied()
if not auth[1].startswith('auth='):
raise PermissionDenied()
token = auth[1].split('auth=', 1)[1]
return self.authenticate_credentials(token)
def authenticate_credentials(self, token):
user_id = check_auth_token(token)
if user_id is False:
raise PermissionDenied()
cache_key = 'reader_user:{0}'.format(user_id)
user = cache.get(cache_key)
if user is None:
try:
user = User.objects.get(pk=user_id, is_active=True)
except User.DoesNotExist:
raise PermissionDenied()
cache.set(cache_key, user, 5*60)
return user, token
|
11495883
|
import spacy
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import LogisticRegression
##########################
import codecs
import collections
import json
import re
import numpy as np
import modeling
import tokenization
import tensorflow as tf
import pandas as pd
import tokenization
import sys
from extract_features import InputExample, InputFeatures, input_fn_builder, model_fn_builder
from extract_features import convert_examples_to_features, _truncate_seq_pair, read_examples
tf.logging.set_verbosity(tf.logging.ERROR)
BERT_BASE_DIR = '/Users/kolsha/Documents/Projects/Python/BERT/multi_cased_L-12_H-768_A-12'
init_checkpoint = BERT_BASE_DIR + '/bert_model.ckpt'
layer_indexes = [-1]
use_one_hot_embeddings = False
max_seq_length = 128
bert_config = modeling.BertConfig.from_json_file(BERT_BASE_DIR +'/bert_config.json')
tokenizer = tokenization.FullTokenizer(
vocab_file=BERT_BASE_DIR+ '/vocab.txt', do_lower_case=False)
for (j, layer_index) in enumerate(layer_indexes):
print(j, layer_index)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
master=None,
tpu_config=tf.contrib.tpu.TPUConfig(
per_host_input_for_training=is_per_host)
)
def convert_lines_to_examples(lines):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
for line in lines:
line = tokenization.convert_to_unicode(line)
if not line:
continue
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
##########################
class BertIntentClassifier(BaseEstimator, ClassifierMixin):
""" """
def __init__(self, spacy_load_path='xx_ent_wiki_sm'):
""" Create a new object """
self.nlp = spacy.load(spacy_load_path)
self.text_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(penalty='l1', random_state=42)),
])
def fit(self, X, y, **kwargs):
""" Fit the logistic regression model to convert sequence to intent.
:param X: input texts for training.
:param y: target intents for training.
:return self
"""
X = self.__semhash_corpus(X)
self.text_clf.fit(X, y)
return self
def predict(self, X):
""" Predict resulting intents by source sequences with a trained logistic regression model.
:param X: source sequences.
:return: resulting intents, predicted for source sequences.
"""
X = self.__semhash_corpus(X)
return self.text_clf.predict(X)
def __preprocess(self, sentence):
""" Preprocess sentence by changing all letters to lower case, replacing pronouns
by ’-PRON-’, and removing all special characters except stop characters.
:param sentence: origin sentence as list of sentense of String type
:return clear sentence as list of sentense of String type
"""
clean_tokens = []
sentence = self.nlp(sentence)
for token in sentence:
if not token.is_stop:
clean_tokens.append(token.lemma_)
return " ".join(clean_tokens)
def __semhash_tokenizer(self, sentence, n=3):
""" Convert sentence into semantic hash tokens.
:param sentence: origin sentence after preprocessing as 1D list of sentense of string type
:return list of semantic hash tokens as np.array, ndim = 2
"""
tokens = sentence.split(" ")
final_tokens = []
for unhashed_token in tokens:
hashed_token = "#{}#".format(unhashed_token)
final_tokens += [''.join(gram)
for gram in list(zip(*[list(hashed_token)[i:] for i in range(n)]))]
return final_tokens
def __semhash_corpus(self, corpus):
""" Convert corpus into semantic hash corpus.
:param corpus: list of unicode strings.
:return list of semantic hash tokens.
"""
new_corpus = []
for sentence in corpus:
sentence = self.__preprocess(sentence)
tokens = self.__semhash_tokenizer(sentence)
new_corpus.append(" ".join(map(str, tokens)))
return new_corpus
|
11495887
|
AWS_ACCESS_KEY_ID = 'aws_access_key_id'
AWS_SECRET_ACCESS_KEY = 'aws_secret_access_key'
TABLE_NAME = 'event_created'
BUCKET_NAME = 'bucket'
CONNECTION = 'host=localhost port=5432 dbname=postgres user=postgres password=<PASSWORD>'
|
11495911
|
from django.apps import AppConfig
class BoilerplateAppConfig(AppConfig):
name = 'boilerplate_app'
|
11495988
|
import io
import json
import csv
import jsontableschema
from jsontableschema.exceptions import InvalidObjectType
from sqlalchemy.dialects.postgresql import insert
class TransformStream(object):
def __init__(self, fun):
self.fun = fun
def read(self, *args, **kwargs):
return self.fun()
def type_fields(schema, row):
missing_values = []
if 'missingValues' in schema._Schema__descriptor:
missing_values = schema._Schema__descriptor['missingValues']
typed_row = []
for index, field in enumerate(schema.fields):
value = row[index]
if value in missing_values:
value = None
elif field.type != 'geojson':
try:
value = field.cast_value(value)
except InvalidObjectType:
value = json.loads(value)
typed_row.append(value)
return typed_row
def copy_from(engine, table_name, table_schema, rows):
schema = jsontableschema.Schema(table_schema)
def transform():
try:
row = next(rows)
typed_row = type_fields(schema, row)
with io.StringIO() as out:
writer = csv.writer(out)
writer.writerow(typed_row)
return out.getvalue()
except StopIteration:
return ''
transformed_rows = TransformStream(transform)
conn = engine.raw_connection()
with conn.cursor() as cur:
copy = 'COPY {} FROM STDIN CSV'.format(table_name)
cur.copy_expert(copy, transformed_rows)
conn.commit()
conn.close()
def copy_to(engine, table_name, file):
conn = engine.raw_connection()
with conn.cursor() as cur:
copy = 'COPY {} TO STDOUT WITH CSV'.format(table_name)
cur.copy_expert(copy, file)
conn.close()
upsert_sql = '''
INSERT INTO {table_name} ({columns})
VALUES ({params_str})
ON CONFLICT ({conflict_columns})
DO UPDATE SET ({columns}) = ({params_str})
'''
def get_upsert_sql(db_schema, table_name, primary_keys, columns):
if db_schema:
table_name = '{}.{}'.format(db_schema, table_name)
return upsert_sql.format(
table_name=table_name,
columns=', '.join(columns),
params_str=', '.join(['%s' for s in range(len(columns))]),
conflict_columns=', '.join(primary_keys))
def upsert(engine, db_schema, table_name, table_schema, rows):
if 'primaryKey' not in table_schema:
raise Exception('`primaryKey` required for upsert')
schema = jsontableschema.Schema(table_schema)
upsert_sql = get_upsert_sql(
db_schema,
table_name,
table_schema['primaryKey'],
list(map(lambda x: x['name'], table_schema['fields'])))
conn = engine.raw_connection()
with conn.cursor() as cur:
try:
for row in rows:
typed_row = type_fields(schema, row)
cur.execute(upsert_sql, typed_row + typed_row) # has to do it twice, insert and set
conn.commit()
except:
conn.rollback()
raise
conn.close()
|
11495992
|
from arekit.common.entities.base import Entity
from arekit.common.experiment.annot.algo.base import BaseAnnotationAlgorithm
from arekit.common.labels.provider.base import BasePairLabelProvider
from arekit.common.news.parsed.base import ParsedNews
from arekit.common.news.parsed.providers.entity_service import EntityServiceProvider, DistanceType
from arekit.common.news.parsed.providers.opinion_pairs import OpinionPairsProvider
from arekit.common.opinions.base import Opinion
class PairBasedAnnotationAlgorithm(BaseAnnotationAlgorithm):
""" Is a pair-based annotation algorithm which
assumes to compose source-target entity pairs
"""
def __init__(self, dist_in_terms_bound, label_provider, dist_in_sents=0, ignored_entity_values=None):
"""
dist_in_terms_bound: int
max allowed distance in term (less than passed value)
"""
assert(isinstance(dist_in_terms_bound, int) or dist_in_terms_bound is None)
assert(isinstance(label_provider, BasePairLabelProvider))
assert(isinstance(dist_in_sents, int))
assert(isinstance(ignored_entity_values, list) or ignored_entity_values is None)
self.__ignored_entity_values = [] if ignored_entity_values is None else ignored_entity_values
self.__label_provider = label_provider
self.__dist_in_terms_bound = dist_in_terms_bound
self.__dist_in_sents = dist_in_sents
# region private methods
@staticmethod
def __create_key_by_entity_pair(e1, e2):
assert(isinstance(e1, Entity))
assert(isinstance(e2, Entity))
return "{}_{}".format(e1.IdInDocument, e2.IdInDocument)
def __is_ignored_entity_value(self, entity_value):
assert(isinstance(entity_value, str))
return entity_value in self.__ignored_entity_values
def __try_create_pair_key(self, entity_service, e1, e2, existed_opinions):
assert(isinstance(entity_service, EntityServiceProvider))
assert(isinstance(e1, Entity))
assert(isinstance(e2, Entity))
if e1.IdInDocument == e2.IdInDocument:
return
if self.__is_ignored_entity_value(entity_value=e1.Value):
return
if self.__is_ignored_entity_value(entity_value=e2.Value):
return
s_dist = entity_service.calc_dist_between_entities(e1=e1, e2=e2, distance_type=DistanceType.InSentences)
if s_dist > self.__dist_in_sents:
return
t_dist = entity_service.calc_dist_between_entities(e1=e1, e2=e2, distance_type=DistanceType.InTerms)
if self.__dist_in_terms_bound is not None and t_dist > self.__dist_in_terms_bound:
return
if existed_opinions is not None:
o = Opinion(source_value=e1.Value,
target_value=e2.Value,
sentiment=self.__label_provider.provide(source=e1, target=e2))
if existed_opinions.has_synonymous_opinion(opinion=o):
return
return self.__create_key_by_entity_pair(e1=e1, e2=e2)
# endregion
def iter_opinions(self, parsed_news, existed_opinions=None):
assert(isinstance(parsed_news, ParsedNews))
def __filter_pair_func(e1, e2):
key = self.__try_create_pair_key(entity_service=entity_service_provider,
e1=e1, e2=e2,
existed_opinions=existed_opinions)
return key is not None
# Initialize providers.
# TODO. Provide here service #245 issue.
opinions_provider = OpinionPairsProvider()
entity_service_provider = EntityServiceProvider()
opinions_provider.init_parsed_news(parsed_news)
entity_service_provider.init_parsed_news(parsed_news)
return opinions_provider.iter_from_all(label_provider=self.__label_provider,
filter_func=__filter_pair_func)
|
11496009
|
import os
from pypact.input.groupstructures import ALL_GROUPS
from pypact.util.decorators import freeze_it
from pypact.util.jsonserializable import JSONSerializable
from pypact.util.numerical import get_float, is_float
from pypact.util.exceptions import PypactException, PypactOutOfRangeException, PypactDeserializeException
@freeze_it
class FluxesFile(JSONSerializable):
"""
FISPACT-II traditionally takes the fluxes file in descending order
not ascending, hence the reason for the reversed.
However the values and __boundaries arrays should be in ascending order.
It is advised that the user should set the values using the setValue
at a specific energy.
"""
def __init__(self, name="fluxes", norm=1.0):
self.name = name
self.norm = norm
self.__boundaries = []
self.__midpointenergies = []
self.values = []
def __len__(self):
return len(self.values)
def reset(self):
self.__init__(name=self.name)
@property
def boundaries(self):
return self.__boundaries
@property
def midPointEnergies(self):
return self.__midpointenergies
def setGroup(self, group):
if group not in ALL_GROUPS:
raise PypactOutOfRangeException("Group {} is not a valid group".format(group))
# group structures are in reverse order
self._setBoundaries(group)
self.values = [0.0]*group
def setValue(self, energy, value):
"""
Requires setGroup is set before
"""
if not self.__boundaries:
raise PypactException("No group set, cannot set value.")
if value < 0.0:
raise PypactOutOfRangeException("Flux value cannot be negative.")
if energy < self.__boundaries[0]:
raise PypactOutOfRangeException("Energy value below minimum for group, group min is {}.".format(self.__boundaries[0]))
if energy >= self.__boundaries[-1]:
raise PypactOutOfRangeException("Energy value exceeds maximum for group, group max is {}.".format(self.__boundaries[-1]))
for i in range(0, len(self.__boundaries)-1):
if self.__boundaries[i+1] > energy:
self.values[i] = value
return
def validate(self):
if len(self.__boundaries) != len(self.values) + 1:
raise PypactOutOfRangeException("Bin boundaries must be of size one greater than values size")
def _setFromReversedBoundaries(self, boundaries):
self.__boundaries = list(reversed(boundaries))
self.__midpointenergies = [(self.__boundaries[i] + self.__boundaries[i+1])/2.0 for i in range(0, len(boundaries)-1)]
def _setBoundaries(self, group):
self._setFromReversedBoundaries(ALL_GROUPS[group])
def _serialize(self, f):
"""
The serialization method
f: file object
NOTE: The values are in reverse order
Format is:
v1
v2
...
vn
1.0
name
"""
for e in list(reversed(self.values)):
f.write("{}\n".format(e))
f.write("{}\n".format(self.norm))
f.write(self.name)
def _deserialize(self, f):
"""
The deserialization method
f: file object
NOTE: The values are in reverse order
Format is:
v1
v2
...
vn
1.0
name
"""
self.reset()
lines = f.readlines()
# last two lines are the normalisation and the name
self.name = str(lines[-1])
self.norm = get_float(lines[-2])
reversed_values = []
for l in lines[:-2]:
for e in l.split():
if is_float(e):
reversed_values.append(get_float(e))
else:
raise PypactDeserializeException("Entry {} in line {} is not a float.".format(e, l))
group = len(reversed_values)
if group not in ALL_GROUPS:
raise PypactDeserializeException("Group structure {} not known to pypact.".format(group))
self.values = list(reversed(reversed_values))
self._setFromReversedBoundaries(ALL_GROUPS[group])
@freeze_it
class ArbFluxesFile(FluxesFile):
def __init__(self, *args, **kwargs):
super(ArbFluxesFile, self).__init__(*args, **kwargs)
# custom boundaries should be in normal order
def setGroup(self, boundaries):
self._setBoundaries(boundaries)
self.values = [0.0]*(len(boundaries)-1)
# custom boundaries should be in normal order
def _setBoundaries(self, boundaries):
self._setFromReversedBoundaries(list(reversed(boundaries)))
def _serialize(self, f):
"""
The serialization method
f: file object
Format is:
e1
e2
...
en+1
v1
v2
...
vn
1.0
name
"""
for e in list(reversed(self.boundaries)):
f.write("{}\n".format(e))
# write space between xs boundaries and values
f.write("\n")
for e in list(reversed(self.values)):
f.write("{}\n".format(e))
f.write("{}\n".format(self.norm))
f.write(self.name)
def _deserialize(self, f):
"""
The deserialization method
f: file object
Format is:
e1
e2
...
en+1
v1
v2
...
vn
1.0
name
"""
self.reset()
lines = f.readlines()
# last two lines are the normalisation and the name
self.name = str(lines[-1])
self.norm = get_float(lines[-2])
found_values = False
reversed_values = []
reversed_bounds = []
for l in lines[:-2]:
# find the space seperator to indicate flux values not boundaries
if not l.split():
found_values = True
for e in l.split():
if is_float(e):
if found_values:
reversed_values.append(get_float(e))
else:
reversed_bounds.append(get_float(e))
else:
raise PypactDeserializeException("Entry {} in line {} is not a float.".format(e, l))
self.values = list(reversed(reversed_values))
self._setFromReversedBoundaries(reversed_bounds)
|
11496027
|
from django.contrib import admin
from metaci.testresults.models import TestMethod, TestResult, TestResultAsset
@admin.register(TestResult)
class TestResultAdmin(admin.ModelAdmin):
list_display = ("build_flow", "method", "duration", "outcome")
list_filter = ("build_flow__build__repo", "method", "method__testclass")
@admin.register(TestResultAsset)
class TestResultAssetAdmin(admin.ModelAdmin):
list_display = ("result", "asset")
list_filter = (
"result__build_flow__build__repo",
"result__method",
"result__method__testclass",
)
raw_id_fields = ("result",)
@admin.register(TestMethod)
class TestMethodAdmin(admin.ModelAdmin):
list_display = ("name", "testclass")
list_filter = ("testclass__repo", "testclass", "test_dashboard")
raw_id_fields = ("testclass",)
|
11496028
|
from roonapi import RoonApi, RoonDiscovery
appinfo = {
"extension_id": "python_roon_test",
"display_name": "Python library for Roon",
"display_version": "1.0.0",
"publisher": "gregd",
"email": "<EMAIL>",
}
# Can be None if you don't yet have a token
try:
core_id = open("my_core_id_file").read()
token = open("my_token_file").read()
except OSError:
print("Please authorise first using discovery.py")
exit()
discover = RoonDiscovery(core_id)
server = discover.first()
discover.stop()
roonapi = RoonApi(appinfo, token, server[0], server[1], True)
# get all zones (as dict)
print(roonapi.zones)
# get all outputs (as dict)
print(roonapi.outputs)
|
11496034
|
import requests
from Services.ApiAddressService import ApiAddressService
from Services.StorageCookieService import StorageCookieService
class AuthApiService(object):
def __init__(self):
self.apiaddress = ApiAddressService()
self.storagecookie = StorageCookieService()
def Version(self):
return requests.get("{0}/Auth/Version".format(self.apiaddress.getaddress()))
def AuthByPassword(self, email, password):
return requests.post("{0}/Auth/AuthByPassword".format(self.apiaddress.getaddress()), data={
"Email": email,
"Password": password
})
def InitPusher(self):
return requests.get("{0}/Auth/InitPusher".format(self.apiaddress.getaddress()),
cookies=self.storagecookie.get())
def SignInStatus(self):
return requests.get("{0}/Auth/SignInStatus".format(self.apiaddress.getaddress(),
cookies=self.storagecookie.get()))
def Me(self):
return requests.get(
"{0}/Auth/Me".format(self.apiaddress.getaddress()),
cookies=self.storagecookie.get())
|
11496048
|
import pickle
class HP:
def __init__(self, grid_size, max_iter, discount):
self.grid_size = grid_size
self.max_iter = max_iter
self.discount = discount
def __str__(self):
return " | ".join(["{} = {}".format(k,v) for k,v in self.__dict__.iteritems()])
class RlHp:
def __init__(self, rl_type, radius, filter_actions, lambda_, q_type):
self.rl_type = rl_type
self.radius = radius
self.filter_actions = filter_actions
self.lambda_ = lambda_
self.q_type = q_type
self.model = None
def __str__(self):
return " | ".join(["{} = {}".format(k,v) for k,v in self.__dict__.iteritems() if k != "model"])
def save_model(self, model, filename):
self.model = model
with open("data/" + filename, "wb") as fout:
pickle.dump(self, fout)
class EsHp:
def __init__(self, radius):
self.radius = radius
def __str__(self):
return " | ".join(["{} = {}".format(k,v) for k,v in self.__dict__.iteritems()])
def save_model(self, model, filename):
self.model = model
with open("data/" + filename, "wb") as fout:
pickle.dump(self, fout)
def load_from(filename):
with open("data/" + filename, "rb") as fin:
out = pickle.load(fin)
return out
|
11496081
|
import bugsnag
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse(b'Some content!')
"""
(some nonsense goes here)
"""
def unhandled_crash(request):
raise RuntimeError('failed to return in time')
def unhandled_crash_in_template(request):
return render(request, 'notes/broken.html')
def handle_notify(request):
items = {}
try:
print("item: {}" % items["nonexistent-item"])
except KeyError as e:
bugsnag.notify(e, unhappy='nonexistent-file')
return HttpResponse(b'everything is fine!', content_type='text/plain')
def handle_notify_custom_info(request):
bugsnag.notify(Exception('something bad happened'), severity='info',
context='custom_info')
return HttpResponse('nothing to see here', content_type='text/plain')
def request_inspection(event):
event.context = event.request.GET['user_id']
def handle_crash_callback(request):
bugsnag.before_notify(request_inspection)
terrible_event()
def terrible_event():
raise RuntimeError('I did something wrong')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.