hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c40ff512eea886e54065c41272ed6fd79c68e2b
| 734
|
py
|
Python
|
setup.py
|
Abdur-rahmaanJ/POSMS-PythonOpenSubscriberMailSystem
|
dbd1038e7e8fb99d3e1cd60633c3969c47d007ad
|
[
"MIT"
] | 1
|
2018-02-16T13:06:10.000Z
|
2018-02-16T13:06:10.000Z
|
setup.py
|
Abdur-rahmaanJ/POSMS-PythonOpenSubscriberMailSystem
|
dbd1038e7e8fb99d3e1cd60633c3969c47d007ad
|
[
"MIT"
] | 6
|
2018-02-16T11:46:28.000Z
|
2018-02-27T10:43:08.000Z
|
setup.py
|
Abdur-rahmaanJ/POSMS-PythonOpenSubscriberMailSystem
|
dbd1038e7e8fb99d3e1cd60633c3969c47d007ad
|
[
"MIT"
] | 1
|
2018-02-21T22:15:31.000Z
|
2018-02-21T22:15:31.000Z
|
# -*- coding: utf-8 -*-
"""
POSMS is a software relieving you from
subscriber system mailing costs
"""
from setuptools import setup, find_packages
NAME = 'posms'
AUTHOR = 'Abdur-Rahmaan Janhangeer'
VERSION = '1.0'
CONTACT = 'arj.python@gmail.com'
URL = 'https://github.com/Abdur-rahmaanJ/pyOSMS-PythonOpenSubscriberMailSystem'
LICENSE = 'MIT'
setup(
name=NAME,
version=VERSION,
long_description=__doc__,
author=AUTHOR,
author_email=CONTACT,
url=URL,
packages=find_packages(),
install_requires=[],
include_package_data=True,
classifiers=['Intended Audience :: individuals',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6'])
| 26.214286
| 79
| 0.673025
|
from setuptools import setup, find_packages
NAME = 'posms'
AUTHOR = 'Abdur-Rahmaan Janhangeer'
VERSION = '1.0'
CONTACT = 'arj.python@gmail.com'
URL = 'https://github.com/Abdur-rahmaanJ/pyOSMS-PythonOpenSubscriberMailSystem'
LICENSE = 'MIT'
setup(
name=NAME,
version=VERSION,
long_description=__doc__,
author=AUTHOR,
author_email=CONTACT,
url=URL,
packages=find_packages(),
install_requires=[],
include_package_data=True,
classifiers=['Intended Audience :: individuals',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6'])
| true
| true
|
1c41037033048f5070fd5e5798ab4220545688a9
| 8,324
|
py
|
Python
|
report_compliance.py
|
agroome/report_compliance
|
9fc0050f6ebc3498528d46b9e0973855d1b8072f
|
[
"MIT"
] | null | null | null |
report_compliance.py
|
agroome/report_compliance
|
9fc0050f6ebc3498528d46b9e0973855d1b8072f
|
[
"MIT"
] | null | null | null |
report_compliance.py
|
agroome/report_compliance
|
9fc0050f6ebc3498528d46b9e0973855d1b8072f
|
[
"MIT"
] | null | null | null |
import csv
from dotenv import load_dotenv
import logging
import os
import datetime
import pandas as pd
import argparse
import time
from collections import defaultdict, Counter
from functools import partial
from pathlib import Path
from typing import List, Iterable
from tenable.io import TenableIO
def timestamp_from_str(date_string: str, fmt: str = '%Y-%m-%d %H:%M') -> int:
"""Provide date and optional time separated by a space '%m/%d/%Y[ %H:%M]' """
if ' ' not in date_string:
date_string = f'{date_string} 00:00'
try:
return int(time.mktime(time.strptime(date_string, fmt)))
except Exception as e:
SystemExit(repr(e))
env_file = Path(__file__).parent / '.env'
load_dotenv(env_file)
parser = argparse.ArgumentParser()
parser.add_argument('--first-seen', help="first seen date mm/dd/yyyy [hh:mm]")
parser.add_argument('--last-seen', help="last seen date mm/dd/yyyy [hh:mm]")
parser.add_argument('--timeout', help="timeout in seconds, default no timeout")
parser.add_argument('--output-folder', default='.', help="report folders created under this location")
parser.add_argument('--log-level', default='INFO', help="defaults to INFO")
parser.add_argument('--status', default=('ERROR', 'WARNING', 'FAILED'), help="include records with status")
args = parser.parse_args()
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
first_seen = timestamp_from_str(args.first_seen) if args.first_seen else None
last_seen = timestamp_from_str(args.last_seen) if args.last_seen else None
timeout = args.timeout if args.timeout else None
dt = datetime.datetime.now()
output_folder = Path(args.output_folder) / str(dt.strftime('%Y-%m-%d'))
logfile = output_folder / 'compliance_export.log'
logging.basicConfig(filename=str(logfile), level=numeric_level)
compliance_fields = [
'actual_value', 'asset_uuid', 'audit_file', 'check_error', 'check_id', 'check_info', 'check_name',
'expected_value', 'first_seen', 'last_seen', 'plugin_id', 'reference', 'see_also', 'solution', 'status'
]
def take(number: int, items: Iterable) -> List:
"""Take number items from an iterable and return a list. (rather than load more_itertools)"""
def take_items():
for i, item in enumerate(items, start=1):
yield item
if i == number:
break
logging.debug('returning items')
return [item for item in take_items()]
def first_item(list_, default=''):
"""Return the first item in a list."""
return list_ and list_[0] or default
def parse_asset_record(record: dict, tags: List[str] = None) -> tuple:
"""Process asset fields to leave ipv4, fqdn, hostname and any specified tags."""
out_record = {
'ipv4': first_item(record['ipv4s']),
'fqdn': first_item(record['fqdns']),
'hostname': first_item(record['hostnames']),
}
if tags is not None:
out_record.update({tag['key']: tag['value'] for tag in record['tags'] if tag['key'] in tags})
return record['id'], out_record
def process_records(records, status=('ERROR', 'WARNING', 'FAILED'), compute_age=True):
"""Computes age for compliance records and reduces records on only those in status argument."""
included_status = set(status)
for record in records:
# only include records when record['status'] is in included_status
if record['status'] not in included_status:
continue
if compute_age:
_first_seen = pd.to_datetime(record['first_seen'])
_last_seen = pd.to_datetime(record['last_seen'])
record['age'] = (_last_seen - _first_seen).days
record['last_timestamp'] = int(_last_seen.timestamp())
# some records have missing fields, let's do a copy here
yield {field: record.get(field, '') for field in compliance_fields}
def inject_fields(records_in: Iterable[dict], payload_dict: dict, on_index: str):
"""Generator that injects fields from payload[index] into record. For use in chaining generators."""
for record in records_in:
try:
# index the related record in payload dict
yield {**record, **payload_dict[record[on_index]]}
except KeyError:
# payload dict is missing the entry for on_index
fields = ['check_name', 'check_info', 'plugin_id']
logging.warning(', '.join([f'{field}: {record[field]}' for field in fields]))
logging.warning(f'payload with id {record[on_index]} not found, continuing')
yield record
def summarize_compliance(data, summarize_by, include_error=True):
fields = ['PASSED', 'WARNING', 'FAILED']
data['count'] = 1
if include_error:
fields.append('ERROR')
_data = (
data
.sort_values(by=['last_seen'], ascending=False)
.groupby(by=['check_name', 'hostname'])
.first()
.reset_index()
.pivot_table(index=summarize_by, columns='status', values='count', fill_value=0, aggfunc='sum')
)
_data['TOTAL'] = sum([_data[status] for status in fields])
for field in fields:
_data[f'%{field}'] = 100 * _data[field] / _data['TOTAL']
_data[f'%{field}'].round(decimals=2)
return _data
def summarize_data(csv_input_file, asset_dictionary, output_file):
collector = defaultdict(lambda: defaultdict(Counter))
with open(csv_input_file, newline='') as fobj:
reader = csv.DictReader(fobj, dialect='excel')
for row in reader:
audit_file = row['audit_file']
asset_uuid = row['asset_uuid']
collector[audit_file][asset_uuid].update([row['status']])
field_names = ['audit_file', 'asset_uuid', 'PASSED', 'WARNING', 'FAILED', 'ERROR']
with open(output_file, 'w', newline='') as fobj:
writer = csv.DictWriter(fobj, dialect='excel', fieldnames=field_names)
writer.writeheader()
for audit_file in collector:
for uuid in collector['audit_file']:
record = dict(audit_file=audit_file, asset_uuid=uuid)
record.update(collector[audit_file][uuid])
asset = asset_dictionary.get(uuid)
if asset:
record.update(asset)
writer.writerow(record)
def main():
if first_seen and not last_seen:
logging.error('first_seen can only be used in combination with last seen')
raise SystemExit('ERROR: first_seen can only be used in combination with last seen')
tags = os.getenv('TAGS', '').split(',')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
tio = TenableIO()
# parse asset records to reduce included fields and pop list values
asset_parser = partial(parse_asset_record, tags=tags)
asset_dictionary = dict(map(asset_parser, tio.exports.assets()))
asset_fields = ['ipv4', 'fqdn', 'hostname']
if tags is not None:
asset_fields.extend(tags)
# data pipeline export -> process_records -> inject_fields
records_iterator = (
inject_fields(
process_records(
tio.exports.compliance(first_seen=first_seen, last_seen=last_seen, timeout=timeout), status=('ERROR', 'WARNING', 'FAILED')), asset_dictionary, on_index='asset_uuid'
)
)
fieldnames = compliance_fields + asset_fields + ['age', 'last_timestamp']
records_per_chunk = 50000
while True:
records = take(records_per_chunk, records_iterator)
if not records:
break
df = pd.DataFrame.from_records(records, columns=fieldnames)
logging.debug(f'writing {len(df)} records')
if 'audit_file' in df:
for audit_file, data in df.groupby('audit_file'):
logging.debug(f'process {len(data)} records from {audit_file}')
audit_file = str(audit_file).replace('.audit', '.csv')
data.to_csv(output_folder / audit_file, index=False, mode='a')
if __name__ == '__main__':
main()
| 39.450237
| 181
| 0.641038
|
import csv
from dotenv import load_dotenv
import logging
import os
import datetime
import pandas as pd
import argparse
import time
from collections import defaultdict, Counter
from functools import partial
from pathlib import Path
from typing import List, Iterable
from tenable.io import TenableIO
def timestamp_from_str(date_string: str, fmt: str = '%Y-%m-%d %H:%M') -> int:
if ' ' not in date_string:
date_string = f'{date_string} 00:00'
try:
return int(time.mktime(time.strptime(date_string, fmt)))
except Exception as e:
SystemExit(repr(e))
env_file = Path(__file__).parent / '.env'
load_dotenv(env_file)
parser = argparse.ArgumentParser()
parser.add_argument('--first-seen', help="first seen date mm/dd/yyyy [hh:mm]")
parser.add_argument('--last-seen', help="last seen date mm/dd/yyyy [hh:mm]")
parser.add_argument('--timeout', help="timeout in seconds, default no timeout")
parser.add_argument('--output-folder', default='.', help="report folders created under this location")
parser.add_argument('--log-level', default='INFO', help="defaults to INFO")
parser.add_argument('--status', default=('ERROR', 'WARNING', 'FAILED'), help="include records with status")
args = parser.parse_args()
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
first_seen = timestamp_from_str(args.first_seen) if args.first_seen else None
last_seen = timestamp_from_str(args.last_seen) if args.last_seen else None
timeout = args.timeout if args.timeout else None
dt = datetime.datetime.now()
output_folder = Path(args.output_folder) / str(dt.strftime('%Y-%m-%d'))
logfile = output_folder / 'compliance_export.log'
logging.basicConfig(filename=str(logfile), level=numeric_level)
compliance_fields = [
'actual_value', 'asset_uuid', 'audit_file', 'check_error', 'check_id', 'check_info', 'check_name',
'expected_value', 'first_seen', 'last_seen', 'plugin_id', 'reference', 'see_also', 'solution', 'status'
]
def take(number: int, items: Iterable) -> List:
def take_items():
for i, item in enumerate(items, start=1):
yield item
if i == number:
break
logging.debug('returning items')
return [item for item in take_items()]
def first_item(list_, default=''):
return list_ and list_[0] or default
def parse_asset_record(record: dict, tags: List[str] = None) -> tuple:
out_record = {
'ipv4': first_item(record['ipv4s']),
'fqdn': first_item(record['fqdns']),
'hostname': first_item(record['hostnames']),
}
if tags is not None:
out_record.update({tag['key']: tag['value'] for tag in record['tags'] if tag['key'] in tags})
return record['id'], out_record
def process_records(records, status=('ERROR', 'WARNING', 'FAILED'), compute_age=True):
included_status = set(status)
for record in records:
if record['status'] not in included_status:
continue
if compute_age:
_first_seen = pd.to_datetime(record['first_seen'])
_last_seen = pd.to_datetime(record['last_seen'])
record['age'] = (_last_seen - _first_seen).days
record['last_timestamp'] = int(_last_seen.timestamp())
yield {field: record.get(field, '') for field in compliance_fields}
def inject_fields(records_in: Iterable[dict], payload_dict: dict, on_index: str):
for record in records_in:
try:
# index the related record in payload dict
yield {**record, **payload_dict[record[on_index]]}
except KeyError:
# payload dict is missing the entry for on_index
fields = ['check_name', 'check_info', 'plugin_id']
logging.warning(', '.join([f'{field}: {record[field]}' for field in fields]))
logging.warning(f'payload with id {record[on_index]} not found, continuing')
yield record
def summarize_compliance(data, summarize_by, include_error=True):
fields = ['PASSED', 'WARNING', 'FAILED']
data['count'] = 1
if include_error:
fields.append('ERROR')
_data = (
data
.sort_values(by=['last_seen'], ascending=False)
.groupby(by=['check_name', 'hostname'])
.first()
.reset_index()
.pivot_table(index=summarize_by, columns='status', values='count', fill_value=0, aggfunc='sum')
)
_data['TOTAL'] = sum([_data[status] for status in fields])
for field in fields:
_data[f'%{field}'] = 100 * _data[field] / _data['TOTAL']
_data[f'%{field}'].round(decimals=2)
return _data
def summarize_data(csv_input_file, asset_dictionary, output_file):
collector = defaultdict(lambda: defaultdict(Counter))
with open(csv_input_file, newline='') as fobj:
reader = csv.DictReader(fobj, dialect='excel')
for row in reader:
audit_file = row['audit_file']
asset_uuid = row['asset_uuid']
collector[audit_file][asset_uuid].update([row['status']])
field_names = ['audit_file', 'asset_uuid', 'PASSED', 'WARNING', 'FAILED', 'ERROR']
with open(output_file, 'w', newline='') as fobj:
writer = csv.DictWriter(fobj, dialect='excel', fieldnames=field_names)
writer.writeheader()
for audit_file in collector:
for uuid in collector['audit_file']:
record = dict(audit_file=audit_file, asset_uuid=uuid)
record.update(collector[audit_file][uuid])
asset = asset_dictionary.get(uuid)
if asset:
record.update(asset)
writer.writerow(record)
def main():
if first_seen and not last_seen:
logging.error('first_seen can only be used in combination with last seen')
raise SystemExit('ERROR: first_seen can only be used in combination with last seen')
tags = os.getenv('TAGS', '').split(',')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
tio = TenableIO()
# parse asset records to reduce included fields and pop list values
asset_parser = partial(parse_asset_record, tags=tags)
asset_dictionary = dict(map(asset_parser, tio.exports.assets()))
asset_fields = ['ipv4', 'fqdn', 'hostname']
if tags is not None:
asset_fields.extend(tags)
# data pipeline export -> process_records -> inject_fields
records_iterator = (
inject_fields(
process_records(
tio.exports.compliance(first_seen=first_seen, last_seen=last_seen, timeout=timeout), status=('ERROR', 'WARNING', 'FAILED')), asset_dictionary, on_index='asset_uuid'
)
)
fieldnames = compliance_fields + asset_fields + ['age', 'last_timestamp']
records_per_chunk = 50000
while True:
records = take(records_per_chunk, records_iterator)
if not records:
break
df = pd.DataFrame.from_records(records, columns=fieldnames)
logging.debug(f'writing {len(df)} records')
if 'audit_file' in df:
for audit_file, data in df.groupby('audit_file'):
logging.debug(f'process {len(data)} records from {audit_file}')
audit_file = str(audit_file).replace('.audit', '.csv')
data.to_csv(output_folder / audit_file, index=False, mode='a')
if __name__ == '__main__':
main()
| true
| true
|
1c41042eeb566322046a54926e78715bc4aad0ba
| 2,999
|
py
|
Python
|
tests/unit/drivers/test_load_groundtruth_driver.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | 1
|
2020-12-23T08:58:49.000Z
|
2020-12-23T08:58:49.000Z
|
tests/unit/drivers/test_load_groundtruth_driver.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/drivers/test_load_groundtruth_driver.py
|
yk/jina
|
ab66e233e74b956390f266881ff5dc4e0110d3ff
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional
import numpy as np
import pytest
from jina import Request
from jina.drivers.evaluate import LoadGroundTruthDriver
from jina.executors.indexers import BaseKVIndexer
from jina.proto import jina_pb2
from jina.types.document import Document
class MockGroundTruthIndexer(BaseKVIndexer):
def add(self, keys: 'np.ndarray', vectors: 'np.ndarray', *args, **kwargs):
pass
def query(self, key: int) -> Optional['Document']:
if key in self.db.keys():
return self.db[key]
else:
return None
def get_query_handler(self):
pass
def get_add_handler(self):
pass
def get_create_handler(self):
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
doc1 = Document()
doc1.id = '01' * 8
doc1.tags['groundtruth'] = True
doc2 = Document()
doc2.id = '02' * 8
doc2.tags['groundtruth'] = True
doc4 = Document()
doc4.id = '04' * 8
doc4.tags['groundtruth'] = True
self.db = {
int(doc1.id): doc1.SerializeToString(),
int(doc2.id): doc2.SerializeToString(),
int(doc4.id): doc4.SerializeToString()
}
class SimpleLoadGroundTruthDriver(LoadGroundTruthDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eval_request = None
@property
def exec_fn(self):
return self._exec_fn
@property
def req(self) -> 'jina_pb2.RequestProto':
"""Get the current (typed) request, shortcut to ``self.pea.request``"""
return self.eval_request
@property
def expect_parts(self) -> int:
return 1
@pytest.fixture(scope='function')
def simple_load_groundtruth_driver():
return SimpleLoadGroundTruthDriver()
@pytest.fixture(scope='function')
def mock_groundtruth_indexer():
return MockGroundTruthIndexer()
@pytest.fixture(scope='function')
def eval_request():
req = Request()
req.request_type = 'search'
# doc: 1
# doc: 2
# doc: 3
# doc: 4
# doc: 5 - will be missing from KV indexer
for idx in range(5):
dp = Document()
dp.id = f'0{str(idx + 1)}' * 8
req.docs.append(dp)
return req
def test_load_groundtruth_driver(mock_groundtruth_indexer, simple_load_groundtruth_driver, eval_request):
simple_load_groundtruth_driver.attach(executor=mock_groundtruth_indexer, pea=None)
simple_load_groundtruth_driver.eval_request = eval_request
simple_load_groundtruth_driver()
assert len(eval_request.docs) == 3
assert len(eval_request.groundtruths) == 3
for groundtruth in eval_request.groundtruths:
assert groundtruth.tags['groundtruth']
assert eval_request.groundtruths[0].id == '01' * 8
assert eval_request.groundtruths[1].id == '02' * 8
# index 3 and 5 have no groundtruth in the KVIndexer
assert eval_request.groundtruths[2].id == '04' * 8
| 27.018018
| 105
| 0.652551
|
from typing import Optional
import numpy as np
import pytest
from jina import Request
from jina.drivers.evaluate import LoadGroundTruthDriver
from jina.executors.indexers import BaseKVIndexer
from jina.proto import jina_pb2
from jina.types.document import Document
class MockGroundTruthIndexer(BaseKVIndexer):
def add(self, keys: 'np.ndarray', vectors: 'np.ndarray', *args, **kwargs):
pass
def query(self, key: int) -> Optional['Document']:
if key in self.db.keys():
return self.db[key]
else:
return None
def get_query_handler(self):
pass
def get_add_handler(self):
pass
def get_create_handler(self):
pass
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
doc1 = Document()
doc1.id = '01' * 8
doc1.tags['groundtruth'] = True
doc2 = Document()
doc2.id = '02' * 8
doc2.tags['groundtruth'] = True
doc4 = Document()
doc4.id = '04' * 8
doc4.tags['groundtruth'] = True
self.db = {
int(doc1.id): doc1.SerializeToString(),
int(doc2.id): doc2.SerializeToString(),
int(doc4.id): doc4.SerializeToString()
}
class SimpleLoadGroundTruthDriver(LoadGroundTruthDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eval_request = None
@property
def exec_fn(self):
return self._exec_fn
@property
def req(self) -> 'jina_pb2.RequestProto':
return self.eval_request
@property
def expect_parts(self) -> int:
return 1
@pytest.fixture(scope='function')
def simple_load_groundtruth_driver():
return SimpleLoadGroundTruthDriver()
@pytest.fixture(scope='function')
def mock_groundtruth_indexer():
return MockGroundTruthIndexer()
@pytest.fixture(scope='function')
def eval_request():
req = Request()
req.request_type = 'search'
for idx in range(5):
dp = Document()
dp.id = f'0{str(idx + 1)}' * 8
req.docs.append(dp)
return req
def test_load_groundtruth_driver(mock_groundtruth_indexer, simple_load_groundtruth_driver, eval_request):
simple_load_groundtruth_driver.attach(executor=mock_groundtruth_indexer, pea=None)
simple_load_groundtruth_driver.eval_request = eval_request
simple_load_groundtruth_driver()
assert len(eval_request.docs) == 3
assert len(eval_request.groundtruths) == 3
for groundtruth in eval_request.groundtruths:
assert groundtruth.tags['groundtruth']
assert eval_request.groundtruths[0].id == '01' * 8
assert eval_request.groundtruths[1].id == '02' * 8
assert eval_request.groundtruths[2].id == '04' * 8
| true
| true
|
1c41047d22bafc1f54d978be33949e6b0f7664c0
| 30,866
|
py
|
Python
|
sdk/python/pulumi_azure_native/security/v20190101preview/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/security/v20190101preview/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/security/v20190101preview/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AssessmentLinksResponse',
'AssessmentStatusResponse',
'AutomationActionEventHubResponse',
'AutomationActionLogicAppResponse',
'AutomationActionWorkspaceResponse',
'AutomationRuleSetResponse',
'AutomationScopeResponse',
'AutomationSourceResponse',
'AutomationTriggeringRuleResponse',
'AzureResourceDetailsResponse',
'OnPremiseResourceDetailsResponse',
'OnPremiseSqlResourceDetailsResponse',
'ScopeElementResponse',
'SuppressionAlertsScopeResponse',
]
@pulumi.output_type
class AssessmentLinksResponse(dict):
"""
Links relevant to the assessment
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "azurePortalUri":
suggest = "azure_portal_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssessmentLinksResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssessmentLinksResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssessmentLinksResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
azure_portal_uri: str):
"""
Links relevant to the assessment
:param str azure_portal_uri: Link to assessment in Azure Portal
"""
pulumi.set(__self__, "azure_portal_uri", azure_portal_uri)
@property
@pulumi.getter(name="azurePortalUri")
def azure_portal_uri(self) -> str:
"""
Link to assessment in Azure Portal
"""
return pulumi.get(self, "azure_portal_uri")
@pulumi.output_type
class AssessmentStatusResponse(dict):
"""
The result of the assessment
"""
def __init__(__self__, *,
code: str,
cause: Optional[str] = None,
description: Optional[str] = None):
"""
The result of the assessment
:param str code: Programmatic code for the status of the assessment
:param str cause: Programmatic code for the cause of the assessment status
:param str description: Human readable description of the assessment status
"""
pulumi.set(__self__, "code", code)
if cause is not None:
pulumi.set(__self__, "cause", cause)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def code(self) -> str:
"""
Programmatic code for the status of the assessment
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def cause(self) -> Optional[str]:
"""
Programmatic code for the cause of the assessment status
"""
return pulumi.get(self, "cause")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Human readable description of the assessment status
"""
return pulumi.get(self, "description")
@pulumi.output_type
class AutomationActionEventHubResponse(dict):
"""
The target Event Hub to which event data will be exported. To learn more about Security Center continuous export capabilities, visit https://aka.ms/ASCExportLearnMore
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionType":
suggest = "action_type"
elif key == "sasPolicyName":
suggest = "sas_policy_name"
elif key == "connectionString":
suggest = "connection_string"
elif key == "eventHubResourceId":
suggest = "event_hub_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationActionEventHubResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationActionEventHubResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationActionEventHubResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_type: str,
sas_policy_name: str,
connection_string: Optional[str] = None,
event_hub_resource_id: Optional[str] = None):
"""
The target Event Hub to which event data will be exported. To learn more about Security Center continuous export capabilities, visit https://aka.ms/ASCExportLearnMore
:param str action_type: The type of the action that will be triggered by the Automation
Expected value is 'EventHub'.
:param str sas_policy_name: The target Event Hub SAS policy name.
:param str connection_string: The target Event Hub connection string (it will not be included in any response).
:param str event_hub_resource_id: The target Event Hub Azure Resource ID.
"""
pulumi.set(__self__, "action_type", 'EventHub')
pulumi.set(__self__, "sas_policy_name", sas_policy_name)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if event_hub_resource_id is not None:
pulumi.set(__self__, "event_hub_resource_id", event_hub_resource_id)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> str:
"""
The type of the action that will be triggered by the Automation
Expected value is 'EventHub'.
"""
return pulumi.get(self, "action_type")
@property
@pulumi.getter(name="sasPolicyName")
def sas_policy_name(self) -> str:
"""
The target Event Hub SAS policy name.
"""
return pulumi.get(self, "sas_policy_name")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
"""
The target Event Hub connection string (it will not be included in any response).
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="eventHubResourceId")
def event_hub_resource_id(self) -> Optional[str]:
"""
The target Event Hub Azure Resource ID.
"""
return pulumi.get(self, "event_hub_resource_id")
@pulumi.output_type
class AutomationActionLogicAppResponse(dict):
"""
The logic app action that should be triggered. To learn more about Security Center's Workflow Automation capabilities, visit https://aka.ms/ASCWorkflowAutomationLearnMore
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionType":
suggest = "action_type"
elif key == "logicAppResourceId":
suggest = "logic_app_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationActionLogicAppResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationActionLogicAppResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationActionLogicAppResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_type: str,
logic_app_resource_id: Optional[str] = None,
uri: Optional[str] = None):
"""
The logic app action that should be triggered. To learn more about Security Center's Workflow Automation capabilities, visit https://aka.ms/ASCWorkflowAutomationLearnMore
:param str action_type: The type of the action that will be triggered by the Automation
Expected value is 'LogicApp'.
:param str logic_app_resource_id: The triggered Logic App Azure Resource ID. This can also reside on other subscriptions, given that you have permissions to trigger the Logic App
:param str uri: The Logic App trigger URI endpoint (it will not be included in any response).
"""
pulumi.set(__self__, "action_type", 'LogicApp')
if logic_app_resource_id is not None:
pulumi.set(__self__, "logic_app_resource_id", logic_app_resource_id)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> str:
"""
The type of the action that will be triggered by the Automation
Expected value is 'LogicApp'.
"""
return pulumi.get(self, "action_type")
@property
@pulumi.getter(name="logicAppResourceId")
def logic_app_resource_id(self) -> Optional[str]:
"""
The triggered Logic App Azure Resource ID. This can also reside on other subscriptions, given that you have permissions to trigger the Logic App
"""
return pulumi.get(self, "logic_app_resource_id")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The Logic App trigger URI endpoint (it will not be included in any response).
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class AutomationActionWorkspaceResponse(dict):
"""
The Log Analytics Workspace to which event data will be exported. Security alerts data will reside in the 'SecurityAlert' table and the assessments data will reside in the 'SecurityRecommendation' table (under the 'Security'/'SecurityCenterFree' solutions). Note that in order to view the data in the workspace, the Security Center Log Analytics free/standard solution needs to be enabled on that workspace. To learn more about Security Center continuous export capabilities, visit https://aka.ms/ASCExportLearnMore
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionType":
suggest = "action_type"
elif key == "workspaceResourceId":
suggest = "workspace_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationActionWorkspaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationActionWorkspaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationActionWorkspaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_type: str,
workspace_resource_id: Optional[str] = None):
"""
The Log Analytics Workspace to which event data will be exported. Security alerts data will reside in the 'SecurityAlert' table and the assessments data will reside in the 'SecurityRecommendation' table (under the 'Security'/'SecurityCenterFree' solutions). Note that in order to view the data in the workspace, the Security Center Log Analytics free/standard solution needs to be enabled on that workspace. To learn more about Security Center continuous export capabilities, visit https://aka.ms/ASCExportLearnMore
:param str action_type: The type of the action that will be triggered by the Automation
Expected value is 'Workspace'.
:param str workspace_resource_id: The fully qualified Log Analytics Workspace Azure Resource ID.
"""
pulumi.set(__self__, "action_type", 'Workspace')
if workspace_resource_id is not None:
pulumi.set(__self__, "workspace_resource_id", workspace_resource_id)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> str:
"""
The type of the action that will be triggered by the Automation
Expected value is 'Workspace'.
"""
return pulumi.get(self, "action_type")
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> Optional[str]:
"""
The fully qualified Log Analytics Workspace Azure Resource ID.
"""
return pulumi.get(self, "workspace_resource_id")
@pulumi.output_type
class AutomationRuleSetResponse(dict):
"""
A rule set which evaluates all its rules upon an event interception. Only when all the included rules in the rule set will be evaluated as 'true', will the event trigger the defined actions.
"""
def __init__(__self__, *,
rules: Optional[Sequence['outputs.AutomationTriggeringRuleResponse']] = None):
"""
A rule set which evaluates all its rules upon an event interception. Only when all the included rules in the rule set will be evaluated as 'true', will the event trigger the defined actions.
"""
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.AutomationTriggeringRuleResponse']]:
return pulumi.get(self, "rules")
@pulumi.output_type
class AutomationScopeResponse(dict):
"""
A single automation scope.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scopePath":
suggest = "scope_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationScopeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationScopeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationScopeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
description: Optional[str] = None,
scope_path: Optional[str] = None):
"""
A single automation scope.
:param str description: The resources scope description.
:param str scope_path: The resources scope path. Can be the subscription on which the automation is defined on or a resource group under that subscription (fully qualified Azure resource IDs).
"""
if description is not None:
pulumi.set(__self__, "description", description)
if scope_path is not None:
pulumi.set(__self__, "scope_path", scope_path)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The resources scope description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="scopePath")
def scope_path(self) -> Optional[str]:
"""
The resources scope path. Can be the subscription on which the automation is defined on or a resource group under that subscription (fully qualified Azure resource IDs).
"""
return pulumi.get(self, "scope_path")
@pulumi.output_type
class AutomationSourceResponse(dict):
"""
The source event types which evaluate the security automation set of rules. For example - security alerts and security assessments. To learn more about the supported security events data models schemas - please visit https://aka.ms/ASCAutomationSchemas.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventSource":
suggest = "event_source"
elif key == "ruleSets":
suggest = "rule_sets"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationSourceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationSourceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationSourceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_source: Optional[str] = None,
rule_sets: Optional[Sequence['outputs.AutomationRuleSetResponse']] = None):
"""
The source event types which evaluate the security automation set of rules. For example - security alerts and security assessments. To learn more about the supported security events data models schemas - please visit https://aka.ms/ASCAutomationSchemas.
:param str event_source: A valid event source type.
:param Sequence['AutomationRuleSetResponse'] rule_sets: A set of rules which evaluate upon event interception. A logical disjunction is applied between defined rule sets (logical 'or').
"""
if event_source is not None:
pulumi.set(__self__, "event_source", event_source)
if rule_sets is not None:
pulumi.set(__self__, "rule_sets", rule_sets)
@property
@pulumi.getter(name="eventSource")
def event_source(self) -> Optional[str]:
"""
A valid event source type.
"""
return pulumi.get(self, "event_source")
@property
@pulumi.getter(name="ruleSets")
def rule_sets(self) -> Optional[Sequence['outputs.AutomationRuleSetResponse']]:
"""
A set of rules which evaluate upon event interception. A logical disjunction is applied between defined rule sets (logical 'or').
"""
return pulumi.get(self, "rule_sets")
@pulumi.output_type
class AutomationTriggeringRuleResponse(dict):
"""
A rule which is evaluated upon event interception. The rule is configured by comparing a specific value from the event model to an expected value. This comparison is done by using one of the supported operators set.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "expectedValue":
suggest = "expected_value"
elif key == "propertyJPath":
suggest = "property_j_path"
elif key == "propertyType":
suggest = "property_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationTriggeringRuleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationTriggeringRuleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationTriggeringRuleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
expected_value: Optional[str] = None,
operator: Optional[str] = None,
property_j_path: Optional[str] = None,
property_type: Optional[str] = None):
"""
A rule which is evaluated upon event interception. The rule is configured by comparing a specific value from the event model to an expected value. This comparison is done by using one of the supported operators set.
:param str expected_value: The expected value.
:param str operator: A valid comparer operator to use. A case-insensitive comparison will be applied for String PropertyType.
:param str property_j_path: The JPath of the entity model property that should be checked.
:param str property_type: The data type of the compared operands (string, integer, floating point number or a boolean [true/false]]
"""
if expected_value is not None:
pulumi.set(__self__, "expected_value", expected_value)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if property_j_path is not None:
pulumi.set(__self__, "property_j_path", property_j_path)
if property_type is not None:
pulumi.set(__self__, "property_type", property_type)
@property
@pulumi.getter(name="expectedValue")
def expected_value(self) -> Optional[str]:
"""
The expected value.
"""
return pulumi.get(self, "expected_value")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
A valid comparer operator to use. A case-insensitive comparison will be applied for String PropertyType.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="propertyJPath")
def property_j_path(self) -> Optional[str]:
"""
The JPath of the entity model property that should be checked.
"""
return pulumi.get(self, "property_j_path")
@property
@pulumi.getter(name="propertyType")
def property_type(self) -> Optional[str]:
"""
The data type of the compared operands (string, integer, floating point number or a boolean [true/false]]
"""
return pulumi.get(self, "property_type")
@pulumi.output_type
class AzureResourceDetailsResponse(dict):
"""
Details of the Azure resource that was assessed
"""
def __init__(__self__, *,
id: str,
source: str):
"""
Details of the Azure resource that was assessed
:param str id: Azure resource Id of the assessed resource
:param str source: The platform where the assessed resource resides
Expected value is 'Azure'.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "source", 'Azure')
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id of the assessed resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def source(self) -> str:
"""
The platform where the assessed resource resides
Expected value is 'Azure'.
"""
return pulumi.get(self, "source")
@pulumi.output_type
class OnPremiseResourceDetailsResponse(dict):
"""
Details of the On Premise resource that was assessed
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "machineName":
suggest = "machine_name"
elif key == "sourceComputerId":
suggest = "source_computer_id"
elif key == "workspaceId":
suggest = "workspace_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OnPremiseResourceDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OnPremiseResourceDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OnPremiseResourceDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
machine_name: str,
source: str,
source_computer_id: str,
vmuuid: str,
workspace_id: str):
"""
Details of the On Premise resource that was assessed
:param str machine_name: The name of the machine
:param str source: The platform where the assessed resource resides
Expected value is 'OnPremise'.
:param str source_computer_id: The oms agent Id installed on the machine
:param str vmuuid: The unique Id of the machine
:param str workspace_id: Azure resource Id of the workspace the machine is attached to
"""
pulumi.set(__self__, "machine_name", machine_name)
pulumi.set(__self__, "source", 'OnPremise')
pulumi.set(__self__, "source_computer_id", source_computer_id)
pulumi.set(__self__, "vmuuid", vmuuid)
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="machineName")
def machine_name(self) -> str:
"""
The name of the machine
"""
return pulumi.get(self, "machine_name")
@property
@pulumi.getter
def source(self) -> str:
"""
The platform where the assessed resource resides
Expected value is 'OnPremise'.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourceComputerId")
def source_computer_id(self) -> str:
"""
The oms agent Id installed on the machine
"""
return pulumi.get(self, "source_computer_id")
@property
@pulumi.getter
def vmuuid(self) -> str:
"""
The unique Id of the machine
"""
return pulumi.get(self, "vmuuid")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
"""
Azure resource Id of the workspace the machine is attached to
"""
return pulumi.get(self, "workspace_id")
@pulumi.output_type
class OnPremiseSqlResourceDetailsResponse(dict):
"""
Details of the On Premise Sql resource that was assessed
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "databaseName":
suggest = "database_name"
elif key == "machineName":
suggest = "machine_name"
elif key == "serverName":
suggest = "server_name"
elif key == "sourceComputerId":
suggest = "source_computer_id"
elif key == "workspaceId":
suggest = "workspace_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OnPremiseSqlResourceDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OnPremiseSqlResourceDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OnPremiseSqlResourceDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
database_name: str,
machine_name: str,
server_name: str,
source: str,
source_computer_id: str,
vmuuid: str,
workspace_id: str):
"""
Details of the On Premise Sql resource that was assessed
:param str database_name: The Sql database name installed on the machine
:param str machine_name: The name of the machine
:param str server_name: The Sql server name installed on the machine
:param str source: The platform where the assessed resource resides
Expected value is 'OnPremiseSql'.
:param str source_computer_id: The oms agent Id installed on the machine
:param str vmuuid: The unique Id of the machine
:param str workspace_id: Azure resource Id of the workspace the machine is attached to
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "machine_name", machine_name)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "source", 'OnPremiseSql')
pulumi.set(__self__, "source_computer_id", source_computer_id)
pulumi.set(__self__, "vmuuid", vmuuid)
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
"""
The Sql database name installed on the machine
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="machineName")
def machine_name(self) -> str:
"""
The name of the machine
"""
return pulumi.get(self, "machine_name")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> str:
"""
The Sql server name installed on the machine
"""
return pulumi.get(self, "server_name")
@property
@pulumi.getter
def source(self) -> str:
"""
The platform where the assessed resource resides
Expected value is 'OnPremiseSql'.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourceComputerId")
def source_computer_id(self) -> str:
"""
The oms agent Id installed on the machine
"""
return pulumi.get(self, "source_computer_id")
@property
@pulumi.getter
def vmuuid(self) -> str:
"""
The unique Id of the machine
"""
return pulumi.get(self, "vmuuid")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
"""
Azure resource Id of the workspace the machine is attached to
"""
return pulumi.get(self, "workspace_id")
@pulumi.output_type
class ScopeElementResponse(dict):
"""
A more specific scope used to identify the alerts to suppress.
"""
def __init__(__self__, *,
field: Optional[str] = None):
"""
A more specific scope used to identify the alerts to suppress.
:param str field: The alert entity type to suppress by.
"""
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter
def field(self) -> Optional[str]:
"""
The alert entity type to suppress by.
"""
return pulumi.get(self, "field")
@pulumi.output_type
class SuppressionAlertsScopeResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allOf":
suggest = "all_of"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SuppressionAlertsScopeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SuppressionAlertsScopeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SuppressionAlertsScopeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
all_of: Sequence['outputs.ScopeElementResponse']):
"""
:param Sequence['ScopeElementResponse'] all_of: All the conditions inside need to be true in order to suppress the alert
"""
pulumi.set(__self__, "all_of", all_of)
@property
@pulumi.getter(name="allOf")
def all_of(self) -> Sequence['outputs.ScopeElementResponse']:
"""
All the conditions inside need to be true in order to suppress the alert
"""
return pulumi.get(self, "all_of")
| 37.595615
| 523
| 0.644398
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AssessmentLinksResponse',
'AssessmentStatusResponse',
'AutomationActionEventHubResponse',
'AutomationActionLogicAppResponse',
'AutomationActionWorkspaceResponse',
'AutomationRuleSetResponse',
'AutomationScopeResponse',
'AutomationSourceResponse',
'AutomationTriggeringRuleResponse',
'AzureResourceDetailsResponse',
'OnPremiseResourceDetailsResponse',
'OnPremiseSqlResourceDetailsResponse',
'ScopeElementResponse',
'SuppressionAlertsScopeResponse',
]
@pulumi.output_type
class AssessmentLinksResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "azurePortalUri":
suggest = "azure_portal_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssessmentLinksResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssessmentLinksResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssessmentLinksResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
azure_portal_uri: str):
pulumi.set(__self__, "azure_portal_uri", azure_portal_uri)
@property
@pulumi.getter(name="azurePortalUri")
def azure_portal_uri(self) -> str:
return pulumi.get(self, "azure_portal_uri")
@pulumi.output_type
class AssessmentStatusResponse(dict):
def __init__(__self__, *,
code: str,
cause: Optional[str] = None,
description: Optional[str] = None):
pulumi.set(__self__, "code", code)
if cause is not None:
pulumi.set(__self__, "cause", cause)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def code(self) -> str:
return pulumi.get(self, "code")
@property
@pulumi.getter
def cause(self) -> Optional[str]:
return pulumi.get(self, "cause")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class AutomationActionEventHubResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionType":
suggest = "action_type"
elif key == "sasPolicyName":
suggest = "sas_policy_name"
elif key == "connectionString":
suggest = "connection_string"
elif key == "eventHubResourceId":
suggest = "event_hub_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationActionEventHubResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationActionEventHubResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationActionEventHubResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_type: str,
sas_policy_name: str,
connection_string: Optional[str] = None,
event_hub_resource_id: Optional[str] = None):
pulumi.set(__self__, "action_type", 'EventHub')
pulumi.set(__self__, "sas_policy_name", sas_policy_name)
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if event_hub_resource_id is not None:
pulumi.set(__self__, "event_hub_resource_id", event_hub_resource_id)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> str:
return pulumi.get(self, "action_type")
@property
@pulumi.getter(name="sasPolicyName")
def sas_policy_name(self) -> str:
return pulumi.get(self, "sas_policy_name")
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="eventHubResourceId")
def event_hub_resource_id(self) -> Optional[str]:
return pulumi.get(self, "event_hub_resource_id")
@pulumi.output_type
class AutomationActionLogicAppResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionType":
suggest = "action_type"
elif key == "logicAppResourceId":
suggest = "logic_app_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationActionLogicAppResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationActionLogicAppResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationActionLogicAppResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_type: str,
logic_app_resource_id: Optional[str] = None,
uri: Optional[str] = None):
pulumi.set(__self__, "action_type", 'LogicApp')
if logic_app_resource_id is not None:
pulumi.set(__self__, "logic_app_resource_id", logic_app_resource_id)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> str:
return pulumi.get(self, "action_type")
@property
@pulumi.getter(name="logicAppResourceId")
def logic_app_resource_id(self) -> Optional[str]:
return pulumi.get(self, "logic_app_resource_id")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
return pulumi.get(self, "uri")
@pulumi.output_type
class AutomationActionWorkspaceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionType":
suggest = "action_type"
elif key == "workspaceResourceId":
suggest = "workspace_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationActionWorkspaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationActionWorkspaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationActionWorkspaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_type: str,
workspace_resource_id: Optional[str] = None):
pulumi.set(__self__, "action_type", 'Workspace')
if workspace_resource_id is not None:
pulumi.set(__self__, "workspace_resource_id", workspace_resource_id)
@property
@pulumi.getter(name="actionType")
def action_type(self) -> str:
return pulumi.get(self, "action_type")
@property
@pulumi.getter(name="workspaceResourceId")
def workspace_resource_id(self) -> Optional[str]:
return pulumi.get(self, "workspace_resource_id")
@pulumi.output_type
class AutomationRuleSetResponse(dict):
def __init__(__self__, *,
rules: Optional[Sequence['outputs.AutomationTriggeringRuleResponse']] = None):
if rules is not None:
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.AutomationTriggeringRuleResponse']]:
return pulumi.get(self, "rules")
@pulumi.output_type
class AutomationScopeResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scopePath":
suggest = "scope_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationScopeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationScopeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationScopeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
description: Optional[str] = None,
scope_path: Optional[str] = None):
if description is not None:
pulumi.set(__self__, "description", description)
if scope_path is not None:
pulumi.set(__self__, "scope_path", scope_path)
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="scopePath")
def scope_path(self) -> Optional[str]:
return pulumi.get(self, "scope_path")
@pulumi.output_type
class AutomationSourceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventSource":
suggest = "event_source"
elif key == "ruleSets":
suggest = "rule_sets"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationSourceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationSourceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationSourceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_source: Optional[str] = None,
rule_sets: Optional[Sequence['outputs.AutomationRuleSetResponse']] = None):
if event_source is not None:
pulumi.set(__self__, "event_source", event_source)
if rule_sets is not None:
pulumi.set(__self__, "rule_sets", rule_sets)
@property
@pulumi.getter(name="eventSource")
def event_source(self) -> Optional[str]:
return pulumi.get(self, "event_source")
@property
@pulumi.getter(name="ruleSets")
def rule_sets(self) -> Optional[Sequence['outputs.AutomationRuleSetResponse']]:
return pulumi.get(self, "rule_sets")
@pulumi.output_type
class AutomationTriggeringRuleResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "expectedValue":
suggest = "expected_value"
elif key == "propertyJPath":
suggest = "property_j_path"
elif key == "propertyType":
suggest = "property_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AutomationTriggeringRuleResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AutomationTriggeringRuleResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AutomationTriggeringRuleResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
expected_value: Optional[str] = None,
operator: Optional[str] = None,
property_j_path: Optional[str] = None,
property_type: Optional[str] = None):
if expected_value is not None:
pulumi.set(__self__, "expected_value", expected_value)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if property_j_path is not None:
pulumi.set(__self__, "property_j_path", property_j_path)
if property_type is not None:
pulumi.set(__self__, "property_type", property_type)
@property
@pulumi.getter(name="expectedValue")
def expected_value(self) -> Optional[str]:
return pulumi.get(self, "expected_value")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
return pulumi.get(self, "operator")
@property
@pulumi.getter(name="propertyJPath")
def property_j_path(self) -> Optional[str]:
return pulumi.get(self, "property_j_path")
@property
@pulumi.getter(name="propertyType")
def property_type(self) -> Optional[str]:
return pulumi.get(self, "property_type")
@pulumi.output_type
class AzureResourceDetailsResponse(dict):
def __init__(__self__, *,
id: str,
source: str):
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "source", 'Azure')
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def source(self) -> str:
return pulumi.get(self, "source")
@pulumi.output_type
class OnPremiseResourceDetailsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "machineName":
suggest = "machine_name"
elif key == "sourceComputerId":
suggest = "source_computer_id"
elif key == "workspaceId":
suggest = "workspace_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OnPremiseResourceDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OnPremiseResourceDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OnPremiseResourceDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
machine_name: str,
source: str,
source_computer_id: str,
vmuuid: str,
workspace_id: str):
pulumi.set(__self__, "machine_name", machine_name)
pulumi.set(__self__, "source", 'OnPremise')
pulumi.set(__self__, "source_computer_id", source_computer_id)
pulumi.set(__self__, "vmuuid", vmuuid)
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="machineName")
def machine_name(self) -> str:
return pulumi.get(self, "machine_name")
@property
@pulumi.getter
def source(self) -> str:
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourceComputerId")
def source_computer_id(self) -> str:
return pulumi.get(self, "source_computer_id")
@property
@pulumi.getter
def vmuuid(self) -> str:
return pulumi.get(self, "vmuuid")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
return pulumi.get(self, "workspace_id")
@pulumi.output_type
class OnPremiseSqlResourceDetailsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "databaseName":
suggest = "database_name"
elif key == "machineName":
suggest = "machine_name"
elif key == "serverName":
suggest = "server_name"
elif key == "sourceComputerId":
suggest = "source_computer_id"
elif key == "workspaceId":
suggest = "workspace_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OnPremiseSqlResourceDetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OnPremiseSqlResourceDetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OnPremiseSqlResourceDetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
database_name: str,
machine_name: str,
server_name: str,
source: str,
source_computer_id: str,
vmuuid: str,
workspace_id: str):
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "machine_name", machine_name)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "source", 'OnPremiseSql')
pulumi.set(__self__, "source_computer_id", source_computer_id)
pulumi.set(__self__, "vmuuid", vmuuid)
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="machineName")
def machine_name(self) -> str:
return pulumi.get(self, "machine_name")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> str:
return pulumi.get(self, "server_name")
@property
@pulumi.getter
def source(self) -> str:
return pulumi.get(self, "source")
@property
@pulumi.getter(name="sourceComputerId")
def source_computer_id(self) -> str:
return pulumi.get(self, "source_computer_id")
@property
@pulumi.getter
def vmuuid(self) -> str:
return pulumi.get(self, "vmuuid")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
return pulumi.get(self, "workspace_id")
@pulumi.output_type
class ScopeElementResponse(dict):
def __init__(__self__, *,
field: Optional[str] = None):
if field is not None:
pulumi.set(__self__, "field", field)
@property
@pulumi.getter
def field(self) -> Optional[str]:
return pulumi.get(self, "field")
@pulumi.output_type
class SuppressionAlertsScopeResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allOf":
suggest = "all_of"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SuppressionAlertsScopeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SuppressionAlertsScopeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SuppressionAlertsScopeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
all_of: Sequence['outputs.ScopeElementResponse']):
pulumi.set(__self__, "all_of", all_of)
@property
@pulumi.getter(name="allOf")
def all_of(self) -> Sequence['outputs.ScopeElementResponse']:
return pulumi.get(self, "all_of")
| true
| true
|
1c4104b0907e043f5b8e0a19a12d85e1623a408c
| 167
|
py
|
Python
|
project_name/urls.py
|
dashgin/DjangoTemplate
|
40d7f7789d378ee632e096a82a1ac96ebeb1a331
|
[
"MIT"
] | null | null | null |
project_name/urls.py
|
dashgin/DjangoTemplate
|
40d7f7789d378ee632e096a82a1ac96ebeb1a331
|
[
"MIT"
] | null | null | null |
project_name/urls.py
|
dashgin/DjangoTemplate
|
40d7f7789d378ee632e096a82a1ac96ebeb1a331
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.contrib import admin
from django.urls import path
urlpatterns = [
path(f'{settings.ADMIN_URL}/', admin.site.urls),
]
| 20.875
| 52
| 0.754491
|
from django.conf import settings
from django.contrib import admin
from django.urls import path
urlpatterns = [
path(f'{settings.ADMIN_URL}/', admin.site.urls),
]
| true
| true
|
1c4105fcafcd6b93844f8cc95b8ee788e5001a2f
| 974
|
py
|
Python
|
prickly-pufferfish/arena/arena/urls.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 40
|
2020-08-02T07:38:22.000Z
|
2021-07-26T01:46:50.000Z
|
prickly-pufferfish/arena/arena/urls.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 134
|
2020-07-31T12:15:45.000Z
|
2020-12-13T04:42:19.000Z
|
prickly-pufferfish/arena/arena/urls.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 101
|
2020-07-31T12:00:47.000Z
|
2021-11-01T09:06:58.000Z
|
"""arena URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import include
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('battle/', include('battle.urls')),
path('', include('main.urls')),
]
| 36.074074
| 77
| 0.695072
|
from django.conf.urls import include
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('battle/', include('battle.urls')),
path('', include('main.urls')),
]
| true
| true
|
1c4106918d68cf17ddcefb063a056798360de561
| 3,407
|
py
|
Python
|
src/icemac/addressbook/browser/search/result/handler/test_delete.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | 1
|
2020-03-26T20:16:44.000Z
|
2020-03-26T20:16:44.000Z
|
src/icemac/addressbook/browser/search/result/handler/test_delete.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | 2
|
2020-02-21T13:04:23.000Z
|
2020-02-21T13:06:10.000Z
|
src/icemac/addressbook/browser/search/result/handler/test_delete.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
def test_delete__DeleteForm__1(search_data, UserFactory, browser):
"""`DeleteForm` allows an administrator to delete found persons."""
address_book = search_data
# Create a user -- the person of a user cannot be deleted using this search
# result handler.
UserFactory(address_book, u'Ben', u'Utzer', u'ben@example.com',
u'12345678', [], keywords=[u'church'])
browser.login('mgr')
browser.keyword_search('church')
# Only the selected persons get deleted. Deselected persons will not:
browser.getControl(name='persons:list').getControl(
value="Person-2").selected = False # This this the person named "Koch"
browser.getControl('Apply on selected persons').displayValue = [
'Delete']
browser.getControl(name='form.buttons.apply').click()
# The number of persons for deletion is shown on the question screen:
# (There are 3 persons with the church keyword in the fixture, one got
# deselected but there is additionally a newly created user.
assert ['3'] == browser.etree.xpath(
'//span[@id="form-widgets-count"]/text()')
assert ('You are not able to delete a person who is referenced.'
in browser.contents)
assert browser.SEARCH_DELETE_URL == browser.url
browser.getControl('Yes, delete').click()
assert 'Selected persons deleted: 2' == browser.message
assert browser.PERSONS_LIST_URL == browser.url
# Only the two non-users got deleted:
assert 'Koch' in browser.contents
assert 'Utzer' in browser.contents
assert 'Liebig' not in browser.contents
assert 'Velleuer' not in browser.contents
def test_delete__DeleteForm__2(search_data, browser):
"""`DeleteForm` can be canceled."""
browser.login('mgr')
browser.keyword_search('church', 'Delete')
# Seleting the `cancel` button leads to the person list without deleting
# anybody:
browser.getControl('No, cancel').click()
assert 'Deletion canceled.' == browser.message
assert browser.PERSONS_LIST_URL == browser.url
assert 'Koch' in browser.contents
assert 'Liebig' in browser.contents
assert 'Velleuer' in browser.contents
@pytest.mark.parametrize('role', ['editor', 'visitor'])
def test_delete__DeleteForm__3(search_data, browser, role):
"""`DeleteForm` cannot be accessed by non-admin users."""
browser.login(role)
browser.keyword_search('church')
# There is no delete option which can be applied:
assert ([
'XLS export main (Exports person data and main addresses resp. '
'phone numbers.)',
'XLS export complete (Exports person data and all addresses resp. '
'phone numbers.)',
'E-Mail (Creates a link to send e-mails.)',
'Names (Comma separated list of person names.)',
'Checklist (List of person names with check-boxes.)',
"iCalendar export birthday (Export person's birthdays as "
".ics file.)",
'Birthday list (Person names sorted by birthday.)',
] == browser.getControl('Apply on selected persons').displayOptions)
browser.assert_forbidden(browser.SEARCH_DELETE_URL)
@pytest.mark.parametrize('role', ['archivist', 'archive-visitor'])
def test_delete__DeleteForm__4(address_book, browser, role):
"""It cannot be accessed by the archive roles."""
browser.login(role)
browser.assert_forbidden(browser.SEARCH_DELETE_URL)
| 44.246753
| 79
| 0.694453
|
import pytest
def test_delete__DeleteForm__1(search_data, UserFactory, browser):
address_book = search_data
UserFactory(address_book, u'Ben', u'Utzer', u'ben@example.com',
u'12345678', [], keywords=[u'church'])
browser.login('mgr')
browser.keyword_search('church')
browser.getControl(name='persons:list').getControl(
value="Person-2").selected = False
browser.getControl('Apply on selected persons').displayValue = [
'Delete']
browser.getControl(name='form.buttons.apply').click()
assert ['3'] == browser.etree.xpath(
'//span[@id="form-widgets-count"]/text()')
assert ('You are not able to delete a person who is referenced.'
in browser.contents)
assert browser.SEARCH_DELETE_URL == browser.url
browser.getControl('Yes, delete').click()
assert 'Selected persons deleted: 2' == browser.message
assert browser.PERSONS_LIST_URL == browser.url
assert 'Koch' in browser.contents
assert 'Utzer' in browser.contents
assert 'Liebig' not in browser.contents
assert 'Velleuer' not in browser.contents
def test_delete__DeleteForm__2(search_data, browser):
browser.login('mgr')
browser.keyword_search('church', 'Delete')
browser.getControl('No, cancel').click()
assert 'Deletion canceled.' == browser.message
assert browser.PERSONS_LIST_URL == browser.url
assert 'Koch' in browser.contents
assert 'Liebig' in browser.contents
assert 'Velleuer' in browser.contents
@pytest.mark.parametrize('role', ['editor', 'visitor'])
def test_delete__DeleteForm__3(search_data, browser, role):
browser.login(role)
browser.keyword_search('church')
assert ([
'XLS export main (Exports person data and main addresses resp. '
'phone numbers.)',
'XLS export complete (Exports person data and all addresses resp. '
'phone numbers.)',
'E-Mail (Creates a link to send e-mails.)',
'Names (Comma separated list of person names.)',
'Checklist (List of person names with check-boxes.)',
"iCalendar export birthday (Export person's birthdays as "
".ics file.)",
'Birthday list (Person names sorted by birthday.)',
] == browser.getControl('Apply on selected persons').displayOptions)
browser.assert_forbidden(browser.SEARCH_DELETE_URL)
@pytest.mark.parametrize('role', ['archivist', 'archive-visitor'])
def test_delete__DeleteForm__4(address_book, browser, role):
browser.login(role)
browser.assert_forbidden(browser.SEARCH_DELETE_URL)
| true
| true
|
1c41071be3bfc9f00212c0cf8b634c2280b7da2d
| 12,056
|
py
|
Python
|
build-tools/apb_packaging/sb_cfn_package/sb_cfn_package.py
|
knqyf263/aws-servicebroker
|
b9b6a3ec4738596d07d6d8e6c79548f16acc25c8
|
[
"Apache-2.0"
] | null | null | null |
build-tools/apb_packaging/sb_cfn_package/sb_cfn_package.py
|
knqyf263/aws-servicebroker
|
b9b6a3ec4738596d07d6d8e6c79548f16acc25c8
|
[
"Apache-2.0"
] | null | null | null |
build-tools/apb_packaging/sb_cfn_package/sb_cfn_package.py
|
knqyf263/aws-servicebroker
|
b9b6a3ec4738596d07d6d8e6c79548f16acc25c8
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
import yaml
from taskcat.utils import CFNYAMLHandler
import os
from random import random
from base64 import b64encode
import shutil
import subprocess
import re
import jinja2
try:
from aws_servicebroker_spec import AwsServiceBrokerSpec
except:
from sb_cfn_package.aws_servicebroker_spec import AwsServiceBrokerSpec
def cli():
if len(logging.getLogger().handlers) == 0:
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().handlers[0].setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
parser = argparse.ArgumentParser()
parser.add_argument(
'-l',
'--loglevel',
default='error',
help='Set loglevel. Allowed values are debug, info, warning, error, critical. Default is warning'
)
parser.add_argument(
'-n',
'--name',
help='name of AWS service'
)
parser.add_argument(
"-s",
"--service-spec-path",
default=None,
help='Path to the service specification to use for the build, if none is provided, the spec will be exctracted from the CloudFormation template'
)
parser.add_argument(
"-t",
"--docker-image-tag",
default=None,
help='tag to use for the docker image'
)
parser.add_argument(
"-a",
"--s3-acl",
default='private',
help='acl to use for objects uploaded to S3, default is private'
)
parser.add_argument(
"-b",
"--s3-bucket",
default=None,
help='bucket to use for artifacts, will autogenerate a new bucket by default'
)
parser.add_argument(
"-p",
"--profile",
default=None,
help='aws credential profile to use'
),
parser.add_argument(
"-c",
"--ci",
default=None,
help='Path to the place build output, if not specified a random directory will be created in /tmp'
)
parser.add_argument(
"templatepath",
help='Path to the CloudFormation template to use for the build'
)
args = parser.parse_args()
template_path = os.path.abspath(args.templatepath)
loglevel = getattr(logging, args.loglevel.upper())
logging.getLogger().setLevel(loglevel)
logging.info('Set loglevel to %s' % args.loglevel.upper())
logging.debug("Passed arguments: {} ".format(args.__dict__))
build_path = None
if args.ci:
build_path = os.path.abspath(args.ci)
try:
shutil.rmtree(build_path + "/%s" % args.name)
except FileNotFoundError:
pass
sb_pack = SbCfnPackage(template_path=template_path, service_spec_path=args.service_spec_path)
artifacts = sb_pack.build_artifacts(args.name, args.s3_acl, args.s3_bucket, args.profile, build_path=build_path)
results = sb_pack.create_apb_skeleton(artifacts['apb_spec'], artifacts['prescribed_parameters'],
artifacts['bindings'], artifacts['template'], args.name, build_path=build_path)
os.chdir(os.path.join(results, 'apb'))
tag = args.docker_image_tag or artifacts['apb_spec']['name']
results = subprocess.run(["apb", "build", "--tag", tag], stdout=subprocess.PIPE)
print(results.stdout.decode("utf-8"))
if results.returncode != 0:
if results.stderr:
print(results.stderr.decode("utf-8"))
raise Exception('apb build failed')
if '/' in tag:
results = subprocess.run(["docker", "push", tag], stdout=subprocess.PIPE)
for l in results.stdout.decode("utf-8").split('\n'):
if not l.endswith(': Preparing') and not l.endswith(': Waiting'):
print(l)
if results.returncode != 0:
if results.stderr:
print(results.stderr.decode("utf-8"))
raise Exception('docker push failed')
if args.ci:
os.makedirs('./ci')
shutil.copy(os.path.join(os.path.dirname(template_path), 'ci/config.yml'), './ci/config.yml')
class SbCfnPackage(object):
"""
Main class to handle all of the packaging operations required to turn a CloudFormation template into an APB
"""
def __init__(self, template_path=None, service_spec_path=None):
"""
Initialise the class, optionally providing paths for the template and a seperate service spec, if
service_spec_path is not specified then we'll look for it in the template Metadata.
:param template_path:
:param service_spec_path:
"""
self.template = {}
self.service_spec = {}
if template_path:
self.template_path = os.path.dirname(template_path)
with open(template_path, 'r') as stream:
self.template = CFNYAMLHandler.ordered_safe_load(stream)
if not service_spec_path:
self.service_spec = self.template['Metadata']['AWS::ServiceBroker::Specification']
if service_spec_path:
with open(service_spec_path, 'r') as stream:
self.service_spec = yaml.load(stream)
if not self.service_spec:
raise Exception("cannot continue without either a ['Metadata']['AWS::ServiceBroker::Specification'] section in the template, or a path to a seperate spec using service_spec_path")
def build_artifacts(self, service_name, s3acl='private', bucket=None, profile=None, test=False, build_path=None):
"""
Builds artifacts required to create an apb using the specification in the cloudformation template metadata
:return:
"""
return AwsServiceBrokerSpec(service_name=service_name, bucket_name=bucket, profile=profile, s3acl=s3acl, test=test).build_abp_spec(self.service_spec, self.template, self.template_path, build_path=build_path)
def create_apb_skeleton(self, apb_spec, prescribed_parameters, bindings, template, service_name, build_path=None):
if build_path:
os.makedirs(build_path, exist_ok=True)
tmpname = os.path.join(build_path, "%s" % service_name)
os.makedirs(os.path.join(build_path, "%s" % service_name), exist_ok=True)
else:
tmpname = '/tmp/AWSSB-' + str(b64encode(bytes(str(random()), 'utf8'))).replace("b'", '').replace("'", '').replace('=', '')
os.makedirs(tmpname)
print("build path: %s" % tmpname)
shutil.copytree(os.path.dirname(os.path.abspath(__file__)) + '/data/apb_template/', tmpname + '/apb')
for dname, dirs, files in os.walk(tmpname):
for fname in files:
fpath = os.path.join(dname, fname)
if not fname.endswith('.zip'):
with open(fpath) as f:
s = f.read()
s = s.replace("${SERVICE_NAME}", service_name).replace("${SERVICE_NAME_UPPER}", service_name.upper()).replace('${CREATE_IAM_USER}', str(bindings['IAMUser']))
with open(fpath, "w") as f:
f.write(s)
for plan in prescribed_parameters.keys():
prescribed_parameters[plan]['params_string'] = "{{ namespace }}::{{ _apb_plan_id }}::{{ _apb_service_class_id }}::{{ _apb_service_instance_id }}"
prescribed_parameters[plan]['params_hash'] = "{{ params_string | checksum }}"
with open(tmpname + '/apb/roles/aws-provision-apb/vars/%s.yml' % plan, "w") as f:
f.write(CFNYAMLHandler.ordered_safe_dump(prescribed_parameters[plan], default_flow_style=False))
shutil.copy(tmpname + '/apb/roles/aws-provision-apb/vars/%s.yml' % plan, tmpname + '/apb/roles/aws-deprovision-apb/vars/%s.yml' % plan)
with open(tmpname + '/apb/apb.yml', "w") as f:
f.write(CFNYAMLHandler.ordered_safe_dump(apb_spec, default_flow_style=False))
with open(tmpname + '/apb/roles/aws-provision-apb/tasks/main.yml') as f:
main_provision_task = yaml.load(f)
create_user = False
try:
create_user = template['Metadata']['AWS::ServiceBroker::Specification']['Bindings']['IAM']['AddKeypair']
except KeyError as e:
pass
full_bindings = []
for t in main_provision_task:
if 'name' in t.keys():
if t['name'] == 'Encode bind credentials':
if not create_user:
aws_key_id = '%s_AWS_ACCESS_KEY_ID' % service_name
aws_key = '%s_AWS_SECRET_ACCESS_KEY' % service_name
t['asb_encode_binding']['fields'].pop(aws_key_id.upper())
t['asb_encode_binding']['fields'].pop(aws_key.upper())
for b in bindings['CFNOutputs']:
t['asb_encode_binding']['fields'][camel_convert(b).upper()] = "{{ cfn.stack_outputs.%s }}" % b
description = ""
if "Description" in template['Outputs'][b].keys():
description = template['Outputs'][b]["Description"]
full_bindings.append({"name": camel_convert(b).upper(), "description": description})
elif 'block' in t.keys():
for it in t['block']:
if it['name'] == 'Create Resources':
if 'Parameters' in template.keys():
for p in template['Parameters'].keys():
default = ""
if 'Default' in template['Parameters'][p].keys():
default = template['Parameters'][p]['Default']
it['cloudformation']['template_parameters'][p] = '{{ %s | default("%s") | string }}' % (p, default)
with open(tmpname + '/apb/roles/aws-provision-apb/tasks/main.yml', 'w') as f:
f.write(CFNYAMLHandler.ordered_safe_dump(main_provision_task, default_flow_style=False))
with open(tmpname + '/template.yaml', 'w') as f:
f.write(CFNYAMLHandler.ordered_safe_dump(template, default_flow_style=False))
render_documentation(apb_spec, template, prescribed_parameters, tmpname, full_bindings)
return tmpname
def render_documentation(apb, template, prescribed_params, tmp_path, bindings):
dir_path = os.path.dirname(os.path.realpath(__file__))
abs_path = os.path.join(dir_path, 'data/serviceclass_documentation_template.md.j2')
path, filename = os.path.split(abs_path)
lengths = {}
for plan in apb['plans']:
lengths[plan['name']] = {"required": 0, "prescribed": 0, "optional": 0, "generic": 0}
if 'parameters' in plan.keys():
lengths[plan['name']]["required"] = len([p for p in plan['parameters'] if 'default' not in p.keys() and p['name'] not in ['aws_access_key', 'aws_secret_key', 'aws_cloudformation_role_arn', 'region', 'SBArtifactS3Bucket', 'SBArtifactS3KeyPrefix', 'VpcId']])
lengths[plan['name']]["optional"] = len([p for p in plan['parameters'] if 'default' in p.keys() and p['name'] not in ['aws_access_key', 'aws_secret_key', 'aws_cloudformation_role_arn', 'region', 'SBArtifactS3Bucket', 'SBArtifactS3KeyPrefix', 'VpcId']])
lengths[plan['name']]["generic"] = len([p for p in plan['parameters'] if p['name'] in ['aws_access_key', 'aws_secret_key', 'aws_cloudformation_role_arn', 'region', 'SBArtifactS3Bucket', 'SBArtifactS3KeyPrefix', 'VpcId']])
lengths[plan['name']]["prescribed"] = len([p for p in prescribed_params[plan['name']] if p not in ["params_string", "params_hash"]])
result = jinja2.Environment(loader=jinja2.FileSystemLoader(path or './')).get_template(filename).render(
{"apb": apb, "template": template, "prescribed_params": prescribed_params, "lengths": lengths, "bindings": bindings}
)
with open(os.path.join(tmp_path, 'README.md'), 'w') as rendered_file:
rendered_file.write(result)
return
def camel_convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
| 50.869198
| 268
| 0.618779
|
import argparse
import logging
import yaml
from taskcat.utils import CFNYAMLHandler
import os
from random import random
from base64 import b64encode
import shutil
import subprocess
import re
import jinja2
try:
from aws_servicebroker_spec import AwsServiceBrokerSpec
except:
from sb_cfn_package.aws_servicebroker_spec import AwsServiceBrokerSpec
def cli():
if len(logging.getLogger().handlers) == 0:
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().handlers[0].setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
parser = argparse.ArgumentParser()
parser.add_argument(
'-l',
'--loglevel',
default='error',
help='Set loglevel. Allowed values are debug, info, warning, error, critical. Default is warning'
)
parser.add_argument(
'-n',
'--name',
help='name of AWS service'
)
parser.add_argument(
"-s",
"--service-spec-path",
default=None,
help='Path to the service specification to use for the build, if none is provided, the spec will be exctracted from the CloudFormation template'
)
parser.add_argument(
"-t",
"--docker-image-tag",
default=None,
help='tag to use for the docker image'
)
parser.add_argument(
"-a",
"--s3-acl",
default='private',
help='acl to use for objects uploaded to S3, default is private'
)
parser.add_argument(
"-b",
"--s3-bucket",
default=None,
help='bucket to use for artifacts, will autogenerate a new bucket by default'
)
parser.add_argument(
"-p",
"--profile",
default=None,
help='aws credential profile to use'
),
parser.add_argument(
"-c",
"--ci",
default=None,
help='Path to the place build output, if not specified a random directory will be created in /tmp'
)
parser.add_argument(
"templatepath",
help='Path to the CloudFormation template to use for the build'
)
args = parser.parse_args()
template_path = os.path.abspath(args.templatepath)
loglevel = getattr(logging, args.loglevel.upper())
logging.getLogger().setLevel(loglevel)
logging.info('Set loglevel to %s' % args.loglevel.upper())
logging.debug("Passed arguments: {} ".format(args.__dict__))
build_path = None
if args.ci:
build_path = os.path.abspath(args.ci)
try:
shutil.rmtree(build_path + "/%s" % args.name)
except FileNotFoundError:
pass
sb_pack = SbCfnPackage(template_path=template_path, service_spec_path=args.service_spec_path)
artifacts = sb_pack.build_artifacts(args.name, args.s3_acl, args.s3_bucket, args.profile, build_path=build_path)
results = sb_pack.create_apb_skeleton(artifacts['apb_spec'], artifacts['prescribed_parameters'],
artifacts['bindings'], artifacts['template'], args.name, build_path=build_path)
os.chdir(os.path.join(results, 'apb'))
tag = args.docker_image_tag or artifacts['apb_spec']['name']
results = subprocess.run(["apb", "build", "--tag", tag], stdout=subprocess.PIPE)
print(results.stdout.decode("utf-8"))
if results.returncode != 0:
if results.stderr:
print(results.stderr.decode("utf-8"))
raise Exception('apb build failed')
if '/' in tag:
results = subprocess.run(["docker", "push", tag], stdout=subprocess.PIPE)
for l in results.stdout.decode("utf-8").split('\n'):
if not l.endswith(': Preparing') and not l.endswith(': Waiting'):
print(l)
if results.returncode != 0:
if results.stderr:
print(results.stderr.decode("utf-8"))
raise Exception('docker push failed')
if args.ci:
os.makedirs('./ci')
shutil.copy(os.path.join(os.path.dirname(template_path), 'ci/config.yml'), './ci/config.yml')
class SbCfnPackage(object):
def __init__(self, template_path=None, service_spec_path=None):
self.template = {}
self.service_spec = {}
if template_path:
self.template_path = os.path.dirname(template_path)
with open(template_path, 'r') as stream:
self.template = CFNYAMLHandler.ordered_safe_load(stream)
if not service_spec_path:
self.service_spec = self.template['Metadata']['AWS::ServiceBroker::Specification']
if service_spec_path:
with open(service_spec_path, 'r') as stream:
self.service_spec = yaml.load(stream)
if not self.service_spec:
raise Exception("cannot continue without either a ['Metadata']['AWS::ServiceBroker::Specification'] section in the template, or a path to a seperate spec using service_spec_path")
def build_artifacts(self, service_name, s3acl='private', bucket=None, profile=None, test=False, build_path=None):
return AwsServiceBrokerSpec(service_name=service_name, bucket_name=bucket, profile=profile, s3acl=s3acl, test=test).build_abp_spec(self.service_spec, self.template, self.template_path, build_path=build_path)
def create_apb_skeleton(self, apb_spec, prescribed_parameters, bindings, template, service_name, build_path=None):
if build_path:
os.makedirs(build_path, exist_ok=True)
tmpname = os.path.join(build_path, "%s" % service_name)
os.makedirs(os.path.join(build_path, "%s" % service_name), exist_ok=True)
else:
tmpname = '/tmp/AWSSB-' + str(b64encode(bytes(str(random()), 'utf8'))).replace("b'", '').replace("'", '').replace('=', '')
os.makedirs(tmpname)
print("build path: %s" % tmpname)
shutil.copytree(os.path.dirname(os.path.abspath(__file__)) + '/data/apb_template/', tmpname + '/apb')
for dname, dirs, files in os.walk(tmpname):
for fname in files:
fpath = os.path.join(dname, fname)
if not fname.endswith('.zip'):
with open(fpath) as f:
s = f.read()
s = s.replace("${SERVICE_NAME}", service_name).replace("${SERVICE_NAME_UPPER}", service_name.upper()).replace('${CREATE_IAM_USER}', str(bindings['IAMUser']))
with open(fpath, "w") as f:
f.write(s)
for plan in prescribed_parameters.keys():
prescribed_parameters[plan]['params_string'] = "{{ namespace }}::{{ _apb_plan_id }}::{{ _apb_service_class_id }}::{{ _apb_service_instance_id }}"
prescribed_parameters[plan]['params_hash'] = "{{ params_string | checksum }}"
with open(tmpname + '/apb/roles/aws-provision-apb/vars/%s.yml' % plan, "w") as f:
f.write(CFNYAMLHandler.ordered_safe_dump(prescribed_parameters[plan], default_flow_style=False))
shutil.copy(tmpname + '/apb/roles/aws-provision-apb/vars/%s.yml' % plan, tmpname + '/apb/roles/aws-deprovision-apb/vars/%s.yml' % plan)
with open(tmpname + '/apb/apb.yml', "w") as f:
f.write(CFNYAMLHandler.ordered_safe_dump(apb_spec, default_flow_style=False))
with open(tmpname + '/apb/roles/aws-provision-apb/tasks/main.yml') as f:
main_provision_task = yaml.load(f)
create_user = False
try:
create_user = template['Metadata']['AWS::ServiceBroker::Specification']['Bindings']['IAM']['AddKeypair']
except KeyError as e:
pass
full_bindings = []
for t in main_provision_task:
if 'name' in t.keys():
if t['name'] == 'Encode bind credentials':
if not create_user:
aws_key_id = '%s_AWS_ACCESS_KEY_ID' % service_name
aws_key = '%s_AWS_SECRET_ACCESS_KEY' % service_name
t['asb_encode_binding']['fields'].pop(aws_key_id.upper())
t['asb_encode_binding']['fields'].pop(aws_key.upper())
for b in bindings['CFNOutputs']:
t['asb_encode_binding']['fields'][camel_convert(b).upper()] = "{{ cfn.stack_outputs.%s }}" % b
description = ""
if "Description" in template['Outputs'][b].keys():
description = template['Outputs'][b]["Description"]
full_bindings.append({"name": camel_convert(b).upper(), "description": description})
elif 'block' in t.keys():
for it in t['block']:
if it['name'] == 'Create Resources':
if 'Parameters' in template.keys():
for p in template['Parameters'].keys():
default = ""
if 'Default' in template['Parameters'][p].keys():
default = template['Parameters'][p]['Default']
it['cloudformation']['template_parameters'][p] = '{{ %s | default("%s") | string }}' % (p, default)
with open(tmpname + '/apb/roles/aws-provision-apb/tasks/main.yml', 'w') as f:
f.write(CFNYAMLHandler.ordered_safe_dump(main_provision_task, default_flow_style=False))
with open(tmpname + '/template.yaml', 'w') as f:
f.write(CFNYAMLHandler.ordered_safe_dump(template, default_flow_style=False))
render_documentation(apb_spec, template, prescribed_parameters, tmpname, full_bindings)
return tmpname
def render_documentation(apb, template, prescribed_params, tmp_path, bindings):
dir_path = os.path.dirname(os.path.realpath(__file__))
abs_path = os.path.join(dir_path, 'data/serviceclass_documentation_template.md.j2')
path, filename = os.path.split(abs_path)
lengths = {}
for plan in apb['plans']:
lengths[plan['name']] = {"required": 0, "prescribed": 0, "optional": 0, "generic": 0}
if 'parameters' in plan.keys():
lengths[plan['name']]["required"] = len([p for p in plan['parameters'] if 'default' not in p.keys() and p['name'] not in ['aws_access_key', 'aws_secret_key', 'aws_cloudformation_role_arn', 'region', 'SBArtifactS3Bucket', 'SBArtifactS3KeyPrefix', 'VpcId']])
lengths[plan['name']]["optional"] = len([p for p in plan['parameters'] if 'default' in p.keys() and p['name'] not in ['aws_access_key', 'aws_secret_key', 'aws_cloudformation_role_arn', 'region', 'SBArtifactS3Bucket', 'SBArtifactS3KeyPrefix', 'VpcId']])
lengths[plan['name']]["generic"] = len([p for p in plan['parameters'] if p['name'] in ['aws_access_key', 'aws_secret_key', 'aws_cloudformation_role_arn', 'region', 'SBArtifactS3Bucket', 'SBArtifactS3KeyPrefix', 'VpcId']])
lengths[plan['name']]["prescribed"] = len([p for p in prescribed_params[plan['name']] if p not in ["params_string", "params_hash"]])
result = jinja2.Environment(loader=jinja2.FileSystemLoader(path or './')).get_template(filename).render(
{"apb": apb, "template": template, "prescribed_params": prescribed_params, "lengths": lengths, "bindings": bindings}
)
with open(os.path.join(tmp_path, 'README.md'), 'w') as rendered_file:
rendered_file.write(result)
return
def camel_convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
| true
| true
|
1c4107b8fb38d070468fe983b3c3690558f12e55
| 188
|
py
|
Python
|
chap09/list0914.py
|
ytianjin/GitTest
|
a657f46098728ad90f7140fadad356e8561c9a7a
|
[
"MIT"
] | null | null | null |
chap09/list0914.py
|
ytianjin/GitTest
|
a657f46098728ad90f7140fadad356e8561c9a7a
|
[
"MIT"
] | null | null | null |
chap09/list0914.py
|
ytianjin/GitTest
|
a657f46098728ad90f7140fadad356e8561c9a7a
|
[
"MIT"
] | null | null | null |
# 使用敬称打招呼的函数(带默认值的形参)
def hello(name, honorific = '老师'):
"""使用敬称打招呼"""
print('你好,{}{}。'.format(name, honorific))
hello('田中')
hello('关根', '先生')
hello('西田', '女士')
| 20.888889
| 47
| 0.521277
|
def hello(name, honorific = '老师'):
print('你好,{}{}。'.format(name, honorific))
hello('田中')
hello('关根', '先生')
hello('西田', '女士')
| true
| true
|
1c4107e872f9b36434a6a6860ea3fbdbb1fe48d1
| 904
|
py
|
Python
|
example-tests/example_BorisPusher.py
|
AleksandrPanov/pyHiChi
|
f9b0c4ec17ad1c9b5897770b86be9152b0ab29ca
|
[
"MIT"
] | null | null | null |
example-tests/example_BorisPusher.py
|
AleksandrPanov/pyHiChi
|
f9b0c4ec17ad1c9b5897770b86be9152b0ab29ca
|
[
"MIT"
] | null | null | null |
example-tests/example_BorisPusher.py
|
AleksandrPanov/pyHiChi
|
f9b0c4ec17ad1c9b5897770b86be9152b0ab29ca
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../bin/")
import pyHiChi as hichi
import numpy as np
def value_E_analytical(pos, t):
E = hichi.Vector3d(1, 0, 0) #sin(pos.x)
return E
def value_B_analytical(pos, t):
B = hichi.Vector3d(0, 0, 0)
return B
t = 0
p_array = hichi.ParticleArray()
fields_array = []
for i in range(11) :
pos = hichi.Vector3d(1.2*i, 3.4*i, 5.6*i)
mo = hichi.Vector3d(i*10**16, 0, 0)
new_p = hichi.Particle(pos, mo, 0.5, hichi.ELECTRON)
p_array.add(new_p)
fields_array.append(hichi.Field(value_E_analytical(pos, t), value_B_analytical(pos, t)))
#Boris Pusher
dt = 0.1
pusher = hichi.BorisPusher()
for k in range(11) :
print(p_array[1].get_momentum())
pusher(p_array, fields_array, dt)
t = dt * k
for j in range(11) :
fields_array[i].set_E(value_E_analytical(pos, t))
fields_array[i].set_B(value_B_analytical(pos, t))
| 25.828571
| 92
| 0.650442
|
import sys
sys.path.append("../bin/")
import pyHiChi as hichi
import numpy as np
def value_E_analytical(pos, t):
E = hichi.Vector3d(1, 0, 0)
return E
def value_B_analytical(pos, t):
B = hichi.Vector3d(0, 0, 0)
return B
t = 0
p_array = hichi.ParticleArray()
fields_array = []
for i in range(11) :
pos = hichi.Vector3d(1.2*i, 3.4*i, 5.6*i)
mo = hichi.Vector3d(i*10**16, 0, 0)
new_p = hichi.Particle(pos, mo, 0.5, hichi.ELECTRON)
p_array.add(new_p)
fields_array.append(hichi.Field(value_E_analytical(pos, t), value_B_analytical(pos, t)))
dt = 0.1
pusher = hichi.BorisPusher()
for k in range(11) :
print(p_array[1].get_momentum())
pusher(p_array, fields_array, dt)
t = dt * k
for j in range(11) :
fields_array[i].set_E(value_E_analytical(pos, t))
fields_array[i].set_B(value_B_analytical(pos, t))
| true
| true
|
1c41088e2af6caa9541933c82a64667acdb51778
| 1,296
|
py
|
Python
|
tests/unit_tests/cx_core/integration/z2m_test.py
|
Crocmagnon/controllerx
|
d928d5fc8d7ab50a86417227d5b732aea43cb653
|
[
"MIT"
] | null | null | null |
tests/unit_tests/cx_core/integration/z2m_test.py
|
Crocmagnon/controllerx
|
d928d5fc8d7ab50a86417227d5b732aea43cb653
|
[
"MIT"
] | null | null | null |
tests/unit_tests/cx_core/integration/z2m_test.py
|
Crocmagnon/controllerx
|
d928d5fc8d7ab50a86417227d5b732aea43cb653
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
import pytest
from cx_core.controller import Controller
from cx_core.integration.z2m import Z2MIntegration
from pytest_mock import MockerFixture
@pytest.mark.parametrize(
"data, action_key, handle_action_called, expected_called_with",
[
({"payload": '{"event_1": "action_1"}'}, "event_1", True, "action_1"),
({}, None, False, Any),
({"payload": '{"action": "action_1"}'}, None, True, "action_1"),
({"payload": '{"event_1": "action_1"}'}, "event_2", False, "Any"),
({"payload": '{"action_rate": 195}'}, "action", False, "Any"),
],
)
@pytest.mark.asyncio
async def test_event_callback(
fake_controller: Controller,
mocker: MockerFixture,
data: Dict,
action_key: str,
handle_action_called: bool,
expected_called_with: str,
):
handle_action_patch = mocker.patch.object(fake_controller, "handle_action")
z2m_integration = Z2MIntegration(fake_controller, {})
z2m_integration.kwargs = (
{"action_key": action_key} if action_key is not None else {}
)
await z2m_integration.event_callback("test", data, {})
if handle_action_called:
handle_action_patch.assert_called_once_with(expected_called_with)
else:
handle_action_patch.assert_not_called()
| 33.230769
| 79
| 0.682099
|
from typing import Any, Dict
import pytest
from cx_core.controller import Controller
from cx_core.integration.z2m import Z2MIntegration
from pytest_mock import MockerFixture
@pytest.mark.parametrize(
"data, action_key, handle_action_called, expected_called_with",
[
({"payload": '{"event_1": "action_1"}'}, "event_1", True, "action_1"),
({}, None, False, Any),
({"payload": '{"action": "action_1"}'}, None, True, "action_1"),
({"payload": '{"event_1": "action_1"}'}, "event_2", False, "Any"),
({"payload": '{"action_rate": 195}'}, "action", False, "Any"),
],
)
@pytest.mark.asyncio
async def test_event_callback(
fake_controller: Controller,
mocker: MockerFixture,
data: Dict,
action_key: str,
handle_action_called: bool,
expected_called_with: str,
):
handle_action_patch = mocker.patch.object(fake_controller, "handle_action")
z2m_integration = Z2MIntegration(fake_controller, {})
z2m_integration.kwargs = (
{"action_key": action_key} if action_key is not None else {}
)
await z2m_integration.event_callback("test", data, {})
if handle_action_called:
handle_action_patch.assert_called_once_with(expected_called_with)
else:
handle_action_patch.assert_not_called()
| true
| true
|
1c4108c472a7ea0e153ff1950c5c6e5ccb5da43a
| 15,206
|
py
|
Python
|
lambda.py
|
amnona/boxbox
|
3641351e34776d504cc660e740df8528120b004e
|
[
"MIT"
] | null | null | null |
lambda.py
|
amnona/boxbox
|
3641351e34776d504cc660e740df8528120b004e
|
[
"MIT"
] | null | null | null |
lambda.py
|
amnona/boxbox
|
3641351e34776d504cc660e740df8528120b004e
|
[
"MIT"
] | null | null | null |
"""
BOX BOX aws skill for alexa
written by amnonim@gmail.com
Requires the following triggers:
Alexa skill kit trigger
Requires the following triggers:
AWS Systerms manager
Amazon cloudwatch logs
uses the boxit role
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import boto3
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome to box box"
speech_output = "I am box box. I will help you to keep track of foods in your fridge. " \
"You can add a box with food by saying something like: " \
"add fish to box number 4. " \
"Or you can ask me where a specific food is by saying:" \
"What is in box number 3. " \
"You can also get a list of all the items by saying: " \
"What is in the fridge?"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "What should i do?" \
"you can add a box to the fridge by saying something like: " \
"add fish to box number 4. " \
"Or you can ask me where a specific food is by saying:" \
"What is in box number 3. " \
"You can also get a list of all the items by saying: " \
"What is in the fridge?"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Box it will miss you! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def query_box(intent, session):
'''what is in box {boxnum}
'''
card_title = intent['name']
session_attributes = {}
should_end_session = True
if 'boxnum' not in intent['slots']:
speech_output = "i didn't get the box number the food is in"
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
print('query_box')
print(intent['slots'])
print(intent['slots']['boxnum'])
boxnum = intent['slots']['boxnum']['value']
ssm = boto3.client('ssm')
pname = '/boxit/box/%s' % boxnum
try:
res = ssm.get_parameter(Name=pname, WithDecryption=False)
except:
speech_output = "box %s is not in my fridge list" % boxnum
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
box_content = res['Parameter']['Value']
box_date = res['Parameter']['LastModifiedDate'].date().strftime('%A %B %d')
speech_output = "box %s contains %s from date %s" % (boxnum, box_content, box_date)
reprompt_text = 'anything else?'
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
def inventory(intent, session):
'''what is in box {boxnum}
'''
card_title = intent['name']
session_attributes = {}
should_end_session = True
ssm = boto3.client('ssm')
res = ssm.get_parameters_by_path(Path='/boxit/box')
speech_output = []
for cbox in res['Parameters']:
boxnum = cbox['Name'].split('/')[-1]
box_content = cbox['Value']
if box_content == 'empty':
print('box %s empty' % boxnum)
continue
box_date = cbox['LastModifiedDate'].date().strftime('%A %B %d')
speech_output.append("box %s contains %s from date %s" % (boxnum, box_content, box_date))
speech_output = '. '.join(speech_output)
reprompt_text = 'anything else?'
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
def add_to_fridge(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
if 'food' not in intent['slots']:
speech_output = "i didn't get the food to add to the fridge"
reprompt_text = "Please say the food to add"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
if 'boxnum' not in intent['slots']:
speech_output = "i didn't get the box number the food is in"
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
print('add to fridge')
print(intent['slots'])
print(intent['slots']['food'])
print(intent['slots']['boxnum'])
if 'value' not in intent['slots']['food']:
speech_output = "i didn't get the food name"
reprompt_text = "Sorry"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
food = intent['slots']['food']['value']
boxnum = intent['slots']['boxnum']['value']
# use parameter store
# looked at example at: https://medium.com/@nqbao/how-to-use-aws-ssm-parameter-store-easily-in-python-94fda04fea84
print('----ssm-----')
ssm = boto3.client('ssm')
print('putting parameter')
pname = '/boxit/box/%s' % boxnum
ssm.put_parameter(Name=pname, Type='String', Value=food, Overwrite=True)
# sdb = boto3.client('sdb')
# response = sdb.create_domain(DomainName='boxit')
# print(response)
# response = sdb.list_domains()
# print("Current domains: %s" % response['DomainNames'])
# response = sdb.put_attributes(DomainName="boxit",ItemName="pita",Attributes=[
# {'Name': 'color', 'Value': color,'Replace': True},])
# print(response)
speech_output = "cool. i added food %s to box %s" % (food, boxnum)
reprompt_text = "food added"
print("food added")
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
def remove_box(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
if 'boxnum' not in intent['slots']:
speech_output = "i didn't get the box number to throw away"
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
print('remove_box')
boxnum = intent['slots']['boxnum']['value']
# use parameter store
# looked at example at: https://medium.com/@nqbao/how-to-use-aws-ssm-parameter-store-easily-in-python-94fda04fea84
print('----ssm-----')
ssm = boto3.client('ssm')
print('deleting parameter')
pname = '/boxit/box/%s' % boxnum
ssm.put_parameter(Name=pname, Type='String', Value='empty', Overwrite=True)
speech_output = "i threw away box %s" % boxnum
reprompt_text = "food removed"
print("food removed")
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
def delete_all(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
# use parameter store
# looked at example at: https://medium.com/@nqbao/how-to-use-aws-ssm-parameter-store-easily-in-python-94fda04fea84
print('----ssm-----')
ssm = boto3.client('ssm')
res = ssm.get_parameters_by_path(Path='/boxit/box')
print('deleting all')
num_boxes = 0
for cbox in res['Parameters']:
boxnum = cbox['Name'].split('/')[-1]
pname = '/boxit/box/%s' % boxnum
ssm.put_parameter(Name=pname, Type='String', Value='empty', Overwrite=True)
num_boxes += 1
speech_output = 'deleted %s boxes' % num_boxes
reprompt_text = "all food removed"
print("food removed")
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
def create_favorite_color_attributes(favorite_color):
return {"favoriteColor": favorite_color}
def set_color_in_session(intent, session):
""" Sets the color in the session and prepares the speech to reply to the
user.
"""
card_title = intent['name']
session_attributes = {}
should_end_session = False
if 'Color' in intent['slots']:
favorite_color = intent['slots']['Color']['value']
session_attributes = create_favorite_color_attributes(favorite_color)
speech_output = "I now know your favorite color is " + \
favorite_color + \
". You can ask me your favorite color by saying, " \
"what's my favorite color?"
reprompt_text = "You can ask me your favorite color by saying, " \
"what's my favorite color?"
else:
speech_output = "I'm not sure what your favorite color is. " \
"Please try again."
reprompt_text = "I'm not sure what your favorite color is. " \
"You can tell me your favorite color by saying, " \
"my favorite color is red."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_color_from_session(intent, session):
session_attributes = {}
reprompt_text = None
if session.get('attributes', {}) and "favoriteColor" in session.get('attributes', {}):
favorite_color = session['attributes']['favoriteColor']
speech_output = "Your favorite color is " + favorite_color + \
". Goodbye."
should_end_session = True
else:
speech_output = "I'm not sure what your favorite color is. " \
"You can say, my favorite color is red."
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "MyColorIsIntent":
return set_color_in_session(intent, session)
elif intent_name == "WhatsMyColorIntent":
return get_color_from_session(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
elif intent_name == 'add_box':
return add_to_fridge(intent, session)
elif intent_name == 'query_box':
return query_box(intent, session)
elif intent_name == 'inventory':
return inventory(intent, session)
elif intent_name == 'remove_box':
return remove_box(intent, session)
elif intent_name == 'delete_all':
return delete_all(intent, session)
else:
return build_response({}, build_speechlet_response("unknown command", "sorry, i don't know how to %s" % intent_name, "i didn't understand", True))
# raise ValueError("Invalid intent %s" % intent_name)
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| 38.890026
| 154
| 0.650007
|
from __future__ import print_function
import boto3
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def get_welcome_response():
session_attributes = {}
card_title = "Welcome to box box"
speech_output = "I am box box. I will help you to keep track of foods in your fridge. " \
"You can add a box with food by saying something like: " \
"add fish to box number 4. " \
"Or you can ask me where a specific food is by saying:" \
"What is in box number 3. " \
"You can also get a list of all the items by saying: " \
"What is in the fridge?"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "What should i do?" \
"you can add a box to the fridge by saying something like: " \
"add fish to box number 4. " \
"Or you can ask me where a specific food is by saying:" \
"What is in box number 3. " \
"You can also get a list of all the items by saying: " \
"What is in the fridge?"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Box it will miss you! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def query_box(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
if 'boxnum' not in intent['slots']:
speech_output = "i didn't get the box number the food is in"
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
print('query_box')
print(intent['slots'])
print(intent['slots']['boxnum'])
boxnum = intent['slots']['boxnum']['value']
ssm = boto3.client('ssm')
pname = '/boxit/box/%s' % boxnum
try:
res = ssm.get_parameter(Name=pname, WithDecryption=False)
except:
speech_output = "box %s is not in my fridge list" % boxnum
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
box_content = res['Parameter']['Value']
box_date = res['Parameter']['LastModifiedDate'].date().strftime('%A %B %d')
speech_output = "box %s contains %s from date %s" % (boxnum, box_content, box_date)
reprompt_text = 'anything else?'
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
def inventory(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
ssm = boto3.client('ssm')
res = ssm.get_parameters_by_path(Path='/boxit/box')
speech_output = []
for cbox in res['Parameters']:
boxnum = cbox['Name'].split('/')[-1]
box_content = cbox['Value']
if box_content == 'empty':
print('box %s empty' % boxnum)
continue
box_date = cbox['LastModifiedDate'].date().strftime('%A %B %d')
speech_output.append("box %s contains %s from date %s" % (boxnum, box_content, box_date))
speech_output = '. '.join(speech_output)
reprompt_text = 'anything else?'
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
def add_to_fridge(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
if 'food' not in intent['slots']:
speech_output = "i didn't get the food to add to the fridge"
reprompt_text = "Please say the food to add"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
if 'boxnum' not in intent['slots']:
speech_output = "i didn't get the box number the food is in"
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
print('add to fridge')
print(intent['slots'])
print(intent['slots']['food'])
print(intent['slots']['boxnum'])
if 'value' not in intent['slots']['food']:
speech_output = "i didn't get the food name"
reprompt_text = "Sorry"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
food = intent['slots']['food']['value']
boxnum = intent['slots']['boxnum']['value']
# use parameter store
# looked at example at: https://medium.com/@nqbao/how-to-use-aws-ssm-parameter-store-easily-in-python-94fda04fea84
print('----ssm-----')
ssm = boto3.client('ssm')
print('putting parameter')
pname = '/boxit/box/%s' % boxnum
ssm.put_parameter(Name=pname, Type='String', Value=food, Overwrite=True)
# sdb = boto3.client('sdb')
# response = sdb.create_domain(DomainName='boxit')
# print(response)
# response = sdb.list_domains()
# print("Current domains: %s" % response['DomainNames'])
# response = sdb.put_attributes(DomainName="boxit",ItemName="pita",Attributes=[
# {'Name': 'color', 'Value': color,'Replace': True},])
# print(response)
speech_output = "cool. i added food %s to box %s" % (food, boxnum)
reprompt_text = "food added"
print("food added")
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
def remove_box(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
if 'boxnum' not in intent['slots']:
speech_output = "i didn't get the box number to throw away"
reprompt_text = "Please say the box number"
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))
print('remove_box')
boxnum = intent['slots']['boxnum']['value']
print('----ssm-----')
ssm = boto3.client('ssm')
print('deleting parameter')
pname = '/boxit/box/%s' % boxnum
ssm.put_parameter(Name=pname, Type='String', Value='empty', Overwrite=True)
speech_output = "i threw away box %s" % boxnum
reprompt_text = "food removed"
print("food removed")
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
def delete_all(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
print('----ssm-----')
ssm = boto3.client('ssm')
res = ssm.get_parameters_by_path(Path='/boxit/box')
print('deleting all')
num_boxes = 0
for cbox in res['Parameters']:
boxnum = cbox['Name'].split('/')[-1]
pname = '/boxit/box/%s' % boxnum
ssm.put_parameter(Name=pname, Type='String', Value='empty', Overwrite=True)
num_boxes += 1
speech_output = 'deleted %s boxes' % num_boxes
reprompt_text = "all food removed"
print("food removed")
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, True))
def create_favorite_color_attributes(favorite_color):
return {"favoriteColor": favorite_color}
def set_color_in_session(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = False
if 'Color' in intent['slots']:
favorite_color = intent['slots']['Color']['value']
session_attributes = create_favorite_color_attributes(favorite_color)
speech_output = "I now know your favorite color is " + \
favorite_color + \
". You can ask me your favorite color by saying, " \
"what's my favorite color?"
reprompt_text = "You can ask me your favorite color by saying, " \
"what's my favorite color?"
else:
speech_output = "I'm not sure what your favorite color is. " \
"Please try again."
reprompt_text = "I'm not sure what your favorite color is. " \
"You can tell me your favorite color by saying, " \
"my favorite color is red."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_color_from_session(intent, session):
session_attributes = {}
reprompt_text = None
if session.get('attributes', {}) and "favoriteColor" in session.get('attributes', {}):
favorite_color = session['attributes']['favoriteColor']
speech_output = "Your favorite color is " + favorite_color + \
". Goodbye."
should_end_session = True
else:
speech_output = "I'm not sure what your favorite color is. " \
"You can say, my favorite color is red."
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if intent_name == "MyColorIsIntent":
return set_color_in_session(intent, session)
elif intent_name == "WhatsMyColorIntent":
return get_color_from_session(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
elif intent_name == 'add_box':
return add_to_fridge(intent, session)
elif intent_name == 'query_box':
return query_box(intent, session)
elif intent_name == 'inventory':
return inventory(intent, session)
elif intent_name == 'remove_box':
return remove_box(intent, session)
elif intent_name == 'delete_all':
return delete_all(intent, session)
else:
return build_response({}, build_speechlet_response("unknown command", "sorry, i don't know how to %s" % intent_name, "i didn't understand", True))
# raise ValueError("Invalid intent %s" % intent_name)
def on_session_ended(session_ended_request, session):
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| true
| true
|
1c410901806e8c5014b3fbe2f6ab0bd1239a957a
| 1,631
|
py
|
Python
|
ImprovementFactor.py
|
varlamnet/group_testing
|
a119c4967af22ceeffb89dcf6267fd262d327552
|
[
"MIT"
] | null | null | null |
ImprovementFactor.py
|
varlamnet/group_testing
|
a119c4967af22ceeffb89dcf6267fd262d327552
|
[
"MIT"
] | null | null | null |
ImprovementFactor.py
|
varlamnet/group_testing
|
a119c4967af22ceeffb89dcf6267fd262d327552
|
[
"MIT"
] | 1
|
2021-03-28T09:44:32.000Z
|
2021-03-28T09:44:32.000Z
|
# This script generates improvement factors for each algorithm. E.g. an
# improvement factor of 5 indicates that 1 test is on average sufficient to
# test 5 people.
import os
os.chdir(os.path.dirname(__file__))
import numpy as np
from warnings import filterwarnings
filterwarnings('ignore')
n=100 # Number of individuals
k=2 # Number of infected
# This will be used later for calculating the improvement factor
sensdic = {}
specdic = {}
spec95 = {}
sens95 = {}
for name in ['COMP','DD','SCOMP','CBP', 'SR']:
sensdic[name] = np.genfromtxt(f'data_output/sensitivity_{name}.csv',
delimiter=',')
specdic[name] = np.genfromtxt(f'data_output/specificity_{name}.csv',
delimiter=',')
# Number of tests for 95% specificity
sens95[name] = np.where(sensdic[name]>.95)[0][0]
spec95[name] = np.where(specdic[name]>.95)[0][0]
# Linear Interpolation to get a slightly more precise number
sens95[name] = sens95[name]-1 + (.95 - sensdic[name][sens95[name]-1])\
/(sensdic[name][sens95[name]] - sensdic[name][sens95[name]-1])
spec95[name] = spec95[name]-1 + (.95 - specdic[name][spec95[name]-1])\
/(specdic[name][spec95[name]] - specdic[name][spec95[name]-1])
# Number of tests for 95% sensitivity AND 95% specificity
ImproveDorfman = n/(n/5 + (1-(1-k/n)**5)*n)
ImproveSR = n/max(sens95['SR'], spec95['SR'])
ImproveCOMP = n/max(sens95['COMP'], spec95['COMP'])
ImproveDD = n/max(sens95['DD'], spec95['DD'])
ImproveSCOMP = n/max(sens95['SCOMP'], spec95['SCOMP'])
ImproveCBP = n/max(sens95['CBP'], spec95['CBP'])
print(ImproveDorfman, ImproveCOMP, ImproveDD, ImproveCBP, ImproveSCOMP,
ImproveSR)
| 37.068182
| 76
| 0.688535
|
import os
os.chdir(os.path.dirname(__file__))
import numpy as np
from warnings import filterwarnings
filterwarnings('ignore')
n=100
k=2
sensdic = {}
specdic = {}
spec95 = {}
sens95 = {}
for name in ['COMP','DD','SCOMP','CBP', 'SR']:
sensdic[name] = np.genfromtxt(f'data_output/sensitivity_{name}.csv',
delimiter=',')
specdic[name] = np.genfromtxt(f'data_output/specificity_{name}.csv',
delimiter=',')
sens95[name] = np.where(sensdic[name]>.95)[0][0]
spec95[name] = np.where(specdic[name]>.95)[0][0]
sens95[name] = sens95[name]-1 + (.95 - sensdic[name][sens95[name]-1])\
/(sensdic[name][sens95[name]] - sensdic[name][sens95[name]-1])
spec95[name] = spec95[name]-1 + (.95 - specdic[name][spec95[name]-1])\
/(specdic[name][spec95[name]] - specdic[name][spec95[name]-1])
ImproveDorfman = n/(n/5 + (1-(1-k/n)**5)*n)
ImproveSR = n/max(sens95['SR'], spec95['SR'])
ImproveCOMP = n/max(sens95['COMP'], spec95['COMP'])
ImproveDD = n/max(sens95['DD'], spec95['DD'])
ImproveSCOMP = n/max(sens95['SCOMP'], spec95['SCOMP'])
ImproveCBP = n/max(sens95['CBP'], spec95['CBP'])
print(ImproveDorfman, ImproveCOMP, ImproveDD, ImproveCBP, ImproveSCOMP,
ImproveSR)
| true
| true
|
1c4109c3b29ae9649491bede2ffe53f7710b824e
| 4,931
|
py
|
Python
|
tests/test_mpi.py
|
jayvdb/python-mbedtls
|
cd042e8b9d6869e39c11e3117199f1ce8ed782d4
|
[
"MIT"
] | null | null | null |
tests/test_mpi.py
|
jayvdb/python-mbedtls
|
cd042e8b9d6869e39c11e3117199f1ce8ed782d4
|
[
"MIT"
] | null | null | null |
tests/test_mpi.py
|
jayvdb/python-mbedtls
|
cd042e8b9d6869e39c11e3117199f1ce8ed782d4
|
[
"MIT"
] | null | null | null |
import numbers
from binascii import hexlify, unhexlify
import pytest
from mbedtls.mpi import MPI
@pytest.mark.parametrize("value", (12, 2 ** 32 - 1, 10 ** 100))
def test_from_int(value):
mpi = MPI.from_int(value)
assert mpi == value
assert value == mpi
assert mpi == mpi
def test_is_integral():
assert isinstance(MPI(42), numbers.Integral)
def test_prime():
assert MPI.prime(512).is_prime()
def test_add():
assert MPI(12) + MPI(12) == 24
assert MPI(12) + 12 == 24
assert 12 + MPI(12) == 24
def test_sub():
assert MPI(12) - MPI(5) == 7
assert MPI(12) - 5 == 7
assert 12 - MPI(5) == 7
def test_mul():
assert MPI(12) * MPI(2) == 24
assert MPI(12) * 2 == 24
assert 12 * MPI(2) == 24
def test_pow():
assert MPI(12).__pow__(5, 12 ** 5 + 1) == 248832
assert pow(MPI(12), 5, 12 ** 5 + 1) == 248832
assert MPI(12).__pow__(5, 7) == 3
assert pow(MPI(12), 5, 7) == 3
def test_eq_same_number_is_true():
assert (MPI(12) == MPI(12)) is True
assert (MPI(12) == 12) is True
assert (12 == MPI(12)) is True
def test_eq_different_numbers_is_false():
assert (MPI(12) == MPI(42)) is False
assert (MPI(12) == 42) is False
assert (12 == MPI(42)) is False
def test_neq_same_numbers_is_false():
assert (MPI(12) != MPI(12)) is False
assert (MPI(12) != 12) is False
assert (12 != MPI(12)) is False
def test_neq_different_numbers_is_true():
assert (MPI(12) != MPI(42)) is True
assert (MPI(12) != 42) is True
assert (12 != MPI(42)) is True
def test_lt_larger_number_is_true():
assert (MPI(12) < MPI(42)) is True
assert (MPI(12) < 42) is True
assert (12 < MPI(42)) is True
def test_lt_smaller_number_is_false():
assert (MPI(42) < MPI(12)) is False
assert (MPI(42) < 12) is False
assert (42 < MPI(12)) is False
def test_lt_same_number_is_false():
assert (MPI(12) < MPI(12)) is False
assert (MPI(12) < 12) is False
assert (12 < MPI(12)) is False
def test_gt_larger_number_is_false():
assert (MPI(12) > MPI(42)) is False
assert (MPI(12) > 42) is False
assert (12 > MPI(42)) is False
def test_gt_smaller_number_is_true():
assert (MPI(42) > MPI(12)) is True
assert (MPI(42) > 12) is True
assert (42 > MPI(12)) is True
def test_gt_same_number_is_false():
assert (MPI(12) > MPI(12)) is False
assert (MPI(12) > 12) is False
assert (12 > MPI(12)) is False
def test_le():
assert (MPI(12) <= MPI(42)) is True
assert (MPI(12) <= MPI(12)) is True
assert (MPI(42) <= MPI(12)) is False
def test_ge():
assert (MPI(42) >= MPI(12)) is True
assert (MPI(42) >= MPI(42)) is True
assert (MPI(12) >= MPI(42)) is False
def test_bool():
assert bool(MPI(0)) is False
def test_float():
assert float(MPI(12)) == 12.0
def test_rshift():
assert MPI(12) >> MPI(2) == 3
assert MPI(12) >> 2 == 3
with pytest.raises(TypeError):
assert 12 >> MPI(2) == 3
def test_lshift():
assert MPI(12) << MPI(2) == 48
assert MPI(12) << 2 == 48
with pytest.raises(TypeError):
assert 12 << MPI(2) == 48
def test_and():
assert MPI(12) & MPI(12) == 12
assert MPI(12) & MPI(3) == 0
assert MPI(15) & MPI(4) == 4
assert MPI(15) & 4 == 4
with pytest.raises(TypeError):
assert 15 & MPI(4) == 4
def test_or():
assert MPI(12) | MPI(12) == 12
assert MPI(12) | MPI(3) == 15
assert MPI(15) | MPI(4) == 15
assert MPI(15) | 4 == 15
with pytest.raises(TypeError):
assert 15 | MPI(4) == 15
def test_xor():
assert MPI(12) ^ MPI(12) == 0
assert MPI(12) ^ MPI(3) == 15
assert MPI(15) ^ MPI(4) == 11
assert MPI(15) ^ 4 == 11
with pytest.raises(TypeError):
assert 15 ^ MPI(4) == 11
def test_floordiv():
assert MPI(24) // MPI(2) == 12
assert MPI(24) // 2 == 12
assert 24 // MPI(2) == 12
def test_mod():
assert MPI(12) % MPI(10) == 2
assert MPI(12) % 10 == 2
assert 12 % MPI(10) == 2
@pytest.mark.parametrize("value", (12, 2 ** 32 - 1, 10 ** 100))
def test_bit_length(value):
mpi = MPI(value)
assert mpi == value
assert mpi.bit_length() == value.bit_length()
def test_from_empty_bytes():
value = b""
big = MPI.from_bytes(value, byteorder="big")
little = MPI.from_bytes(value, byteorder="little")
assert big == little == 0
assert big.bit_length() == little.bit_length() == 0
def test_from_bytes():
value = unhexlify(b"DEADBEEF")
mpi = MPI.from_bytes(value, byteorder="big")
assert mpi.to_bytes(4, byteorder="big") == unhexlify(b"DEADBEEF")
assert mpi.to_bytes(4, byteorder="little") == unhexlify(b"EFBEADDE")
assert mpi == int(hexlify(value), 16)
def test_to_bytes_overflow():
value = unhexlify(b"DEEADBEEFF")
mpi = MPI.from_bytes(value, byteorder="big")
with pytest.raises(OverflowError):
mpi.to_bytes(2, byteorder="big")
| 23.369668
| 72
| 0.600892
|
import numbers
from binascii import hexlify, unhexlify
import pytest
from mbedtls.mpi import MPI
@pytest.mark.parametrize("value", (12, 2 ** 32 - 1, 10 ** 100))
def test_from_int(value):
mpi = MPI.from_int(value)
assert mpi == value
assert value == mpi
assert mpi == mpi
def test_is_integral():
assert isinstance(MPI(42), numbers.Integral)
def test_prime():
assert MPI.prime(512).is_prime()
def test_add():
assert MPI(12) + MPI(12) == 24
assert MPI(12) + 12 == 24
assert 12 + MPI(12) == 24
def test_sub():
assert MPI(12) - MPI(5) == 7
assert MPI(12) - 5 == 7
assert 12 - MPI(5) == 7
def test_mul():
assert MPI(12) * MPI(2) == 24
assert MPI(12) * 2 == 24
assert 12 * MPI(2) == 24
def test_pow():
assert MPI(12).__pow__(5, 12 ** 5 + 1) == 248832
assert pow(MPI(12), 5, 12 ** 5 + 1) == 248832
assert MPI(12).__pow__(5, 7) == 3
assert pow(MPI(12), 5, 7) == 3
def test_eq_same_number_is_true():
assert (MPI(12) == MPI(12)) is True
assert (MPI(12) == 12) is True
assert (12 == MPI(12)) is True
def test_eq_different_numbers_is_false():
assert (MPI(12) == MPI(42)) is False
assert (MPI(12) == 42) is False
assert (12 == MPI(42)) is False
def test_neq_same_numbers_is_false():
assert (MPI(12) != MPI(12)) is False
assert (MPI(12) != 12) is False
assert (12 != MPI(12)) is False
def test_neq_different_numbers_is_true():
assert (MPI(12) != MPI(42)) is True
assert (MPI(12) != 42) is True
assert (12 != MPI(42)) is True
def test_lt_larger_number_is_true():
assert (MPI(12) < MPI(42)) is True
assert (MPI(12) < 42) is True
assert (12 < MPI(42)) is True
def test_lt_smaller_number_is_false():
assert (MPI(42) < MPI(12)) is False
assert (MPI(42) < 12) is False
assert (42 < MPI(12)) is False
def test_lt_same_number_is_false():
assert (MPI(12) < MPI(12)) is False
assert (MPI(12) < 12) is False
assert (12 < MPI(12)) is False
def test_gt_larger_number_is_false():
assert (MPI(12) > MPI(42)) is False
assert (MPI(12) > 42) is False
assert (12 > MPI(42)) is False
def test_gt_smaller_number_is_true():
assert (MPI(42) > MPI(12)) is True
assert (MPI(42) > 12) is True
assert (42 > MPI(12)) is True
def test_gt_same_number_is_false():
assert (MPI(12) > MPI(12)) is False
assert (MPI(12) > 12) is False
assert (12 > MPI(12)) is False
def test_le():
assert (MPI(12) <= MPI(42)) is True
assert (MPI(12) <= MPI(12)) is True
assert (MPI(42) <= MPI(12)) is False
def test_ge():
assert (MPI(42) >= MPI(12)) is True
assert (MPI(42) >= MPI(42)) is True
assert (MPI(12) >= MPI(42)) is False
def test_bool():
assert bool(MPI(0)) is False
def test_float():
assert float(MPI(12)) == 12.0
def test_rshift():
assert MPI(12) >> MPI(2) == 3
assert MPI(12) >> 2 == 3
with pytest.raises(TypeError):
assert 12 >> MPI(2) == 3
def test_lshift():
assert MPI(12) << MPI(2) == 48
assert MPI(12) << 2 == 48
with pytest.raises(TypeError):
assert 12 << MPI(2) == 48
def test_and():
assert MPI(12) & MPI(12) == 12
assert MPI(12) & MPI(3) == 0
assert MPI(15) & MPI(4) == 4
assert MPI(15) & 4 == 4
with pytest.raises(TypeError):
assert 15 & MPI(4) == 4
def test_or():
assert MPI(12) | MPI(12) == 12
assert MPI(12) | MPI(3) == 15
assert MPI(15) | MPI(4) == 15
assert MPI(15) | 4 == 15
with pytest.raises(TypeError):
assert 15 | MPI(4) == 15
def test_xor():
assert MPI(12) ^ MPI(12) == 0
assert MPI(12) ^ MPI(3) == 15
assert MPI(15) ^ MPI(4) == 11
assert MPI(15) ^ 4 == 11
with pytest.raises(TypeError):
assert 15 ^ MPI(4) == 11
def test_floordiv():
assert MPI(24) // MPI(2) == 12
assert MPI(24) // 2 == 12
assert 24 // MPI(2) == 12
def test_mod():
assert MPI(12) % MPI(10) == 2
assert MPI(12) % 10 == 2
assert 12 % MPI(10) == 2
@pytest.mark.parametrize("value", (12, 2 ** 32 - 1, 10 ** 100))
def test_bit_length(value):
mpi = MPI(value)
assert mpi == value
assert mpi.bit_length() == value.bit_length()
def test_from_empty_bytes():
value = b""
big = MPI.from_bytes(value, byteorder="big")
little = MPI.from_bytes(value, byteorder="little")
assert big == little == 0
assert big.bit_length() == little.bit_length() == 0
def test_from_bytes():
value = unhexlify(b"DEADBEEF")
mpi = MPI.from_bytes(value, byteorder="big")
assert mpi.to_bytes(4, byteorder="big") == unhexlify(b"DEADBEEF")
assert mpi.to_bytes(4, byteorder="little") == unhexlify(b"EFBEADDE")
assert mpi == int(hexlify(value), 16)
def test_to_bytes_overflow():
value = unhexlify(b"DEEADBEEFF")
mpi = MPI.from_bytes(value, byteorder="big")
with pytest.raises(OverflowError):
mpi.to_bytes(2, byteorder="big")
| true
| true
|
1c410a83e1a2b1847a2778180688b5c83598bbe2
| 837
|
py
|
Python
|
services/nris-api/backend/app/nris/models/inspection_reason.py
|
bcgov/mds
|
6c427a66a5edb4196222607291adef8fd6677038
|
[
"Apache-2.0"
] | 25
|
2018-07-09T19:04:37.000Z
|
2022-03-15T17:27:10.000Z
|
services/nris-api/backend/app/nris/models/inspection_reason.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 983
|
2018-04-25T20:08:07.000Z
|
2022-03-31T21:45:20.000Z
|
services/nris-api/backend/app/nris/models/inspection_reason.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 58
|
2018-05-15T22:35:50.000Z
|
2021-11-29T19:40:52.000Z
|
from app.extensions import db
from sqlalchemy.orm import validates
from app.nris.utils.base_model import Base
class InspectionReason(Base):
__tablename__ = "inspection_reason"
__table_args__ = {
'comment':
'Lookup table that contains a list of inspection reasons. E.g. Planned, Unplanned, Compliant, Non-compliance report'
}
inspection_reason_id = db.Column(db.Integer, primary_key=True)
inspection_reason_code = db.Column(db.String(10485760), nullable=False)
inspection_reason_description = db.Column(db.String(10485760))
def __repr__(self):
return f'<InspectionReason inspection_reason_code={self.inspection_reason_code} inspection_reason_description={self.inspection_reason_description}>'
@classmethod
def find_all_inspection_reason(cls):
return cls.query.all()
| 38.045455
| 156
| 0.757467
|
from app.extensions import db
from sqlalchemy.orm import validates
from app.nris.utils.base_model import Base
class InspectionReason(Base):
__tablename__ = "inspection_reason"
__table_args__ = {
'comment':
'Lookup table that contains a list of inspection reasons. E.g. Planned, Unplanned, Compliant, Non-compliance report'
}
inspection_reason_id = db.Column(db.Integer, primary_key=True)
inspection_reason_code = db.Column(db.String(10485760), nullable=False)
inspection_reason_description = db.Column(db.String(10485760))
def __repr__(self):
return f'<InspectionReason inspection_reason_code={self.inspection_reason_code} inspection_reason_description={self.inspection_reason_description}>'
@classmethod
def find_all_inspection_reason(cls):
return cls.query.all()
| true
| true
|
1c410aa85f8e9da26dca518cc43803e0a9d37290
| 6,653
|
py
|
Python
|
detectron2/export/caffe2_inference.py
|
MIXIAOXIN/detectron2-0.3-mxx
|
3b4eb6da27b6360139228052690bce7a74b1268e
|
[
"Apache-2.0"
] | 1,158
|
2020-11-24T04:44:06.000Z
|
2022-03-31T07:24:11.000Z
|
detectron2/export/caffe2_inference.py
|
MIXIAOXIN/detectron2-0.3-mxx
|
3b4eb6da27b6360139228052690bce7a74b1268e
|
[
"Apache-2.0"
] | 94
|
2020-11-25T08:29:07.000Z
|
2022-03-30T09:18:09.000Z
|
detectron2/export/caffe2_inference.py
|
MIXIAOXIN/detectron2-0.3-mxx
|
3b4eb6da27b6360139228052690bce7a74b1268e
|
[
"Apache-2.0"
] | 189
|
2020-11-24T07:32:03.000Z
|
2022-03-28T06:16:30.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from itertools import count
import torch
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
logger = logging.getLogger(__name__)
# ===== ref: mobile-vision's 'Caffe2Wrapper' class ======
class ProtobufModel(torch.nn.Module):
"""
Wrapper of a caffe2's protobuf model.
It works just like nn.Module, but running caffe2 under the hood.
Input/Output are Dict[str, tensor] whose keys are in external_input/output.
"""
_ids = count(0)
def __init__(self, predict_net, init_net):
logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...")
super().__init__()
assert isinstance(predict_net, caffe2_pb2.NetDef)
assert isinstance(init_net, caffe2_pb2.NetDef)
# create unique temporary workspace for each instance
self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids))
self.net = core.Net(predict_net)
logger.info("Running init_net once to fill the parameters ...")
with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
ws.RunNetOnce(init_net)
uninitialized_external_input = []
for blob in self.net.Proto().external_input:
if blob not in ws.Blobs():
uninitialized_external_input.append(blob)
ws.CreateBlob(blob)
ws.CreateNet(self.net)
self._error_msgs = set()
self._input_blobs = uninitialized_external_input
def _infer_output_devices(self, inputs):
"""
Returns:
list[str]: list of device for each external output
"""
def _get_device_type(torch_tensor):
assert torch_tensor.device.type in ["cpu", "cuda"]
assert torch_tensor.device.index == 0
return torch_tensor.device.type
predict_net = self.net.Proto()
input_device_types = {
(name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs)
}
device_type_map = infer_device_type(
predict_net, known_status=input_device_types, device_name_style="pytorch"
)
ssa, versions = core.get_ssa(predict_net)
versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
output_devices = [device_type_map[outp] for outp in versioned_outputs]
return output_devices
def forward(self, inputs):
"""
Args:
inputs (tuple[torch.Tensor])
Returns:
dict[str, torch.Tensor]
"""
assert len(inputs) == len(self._input_blobs), (
f"Length of inputs ({len(inputs)}) "
f"doesn't match the required input blobs: {self._input_blobs}"
)
with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
for b, tensor in zip(self._input_blobs, inputs):
ws.FeedBlob(b, tensor)
try:
ws.RunNet(self.net.Proto().name)
except RuntimeError as e:
if not str(e) in self._error_msgs:
self._error_msgs.add(str(e))
logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
logger.warning("Catch the error and use partial results.")
c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
# Remove outputs of current run, this is necessary in order to
# prevent fetching the result from previous run if the model fails
# in the middle.
for b in self.net.Proto().external_output:
# Needs to create uninitialized blob to make the net runable.
# This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b),
# but there'no such API.
ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).")
# Cast output to torch.Tensor on the desired device
output_devices = (
self._infer_output_devices(inputs)
if any(t.device.type != "cpu" for t in inputs)
else ["cpu" for _ in self.net.Proto().external_output]
)
outputs = []
for name, c2_output, device in zip(
self.net.Proto().external_output, c2_outputs, output_devices
):
if not isinstance(c2_output, np.ndarray):
raise RuntimeError(
"Invalid output for blob {}, received: {}".format(name, c2_output)
)
outputs.append(torch.Tensor(c2_output).to(device=device))
# TODO change to tuple in the future
return dict(zip(self.net.Proto().external_output, outputs))
class ProtobufDetectionModel(torch.nn.Module):
"""
A class works just like a pytorch meta arch in terms of inference, but running
caffe2 model under the hood.
"""
def __init__(self, predict_net, init_net, *, convert_outputs=None):
"""
Args:
predict_net, init_net (core.Net): caffe2 nets
convert_outptus (callable): a function that converts caffe2
outputs to the same format of the original pytorch model.
By default, use the one defined in the caffe2 meta_arch.
"""
super().__init__()
self.protobuf_model = ProtobufModel(predict_net, init_net)
self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
if convert_outputs is None:
meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
else:
self._convert_outputs = convert_outputs
def _convert_inputs(self, batched_inputs):
# currently all models convert inputs in the same way
return convert_batched_inputs_to_c2_format(
batched_inputs, self.size_divisibility, self.device
)
def forward(self, batched_inputs):
c2_inputs = self._convert_inputs(batched_inputs)
c2_results = self.protobuf_model(c2_inputs)
return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
| 41.067901
| 98
| 0.638208
|
import logging
import numpy as np
from itertools import count
import torch
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
logger = logging.getLogger(__name__)
class ProtobufModel(torch.nn.Module):
_ids = count(0)
def __init__(self, predict_net, init_net):
logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...")
super().__init__()
assert isinstance(predict_net, caffe2_pb2.NetDef)
assert isinstance(init_net, caffe2_pb2.NetDef)
# create unique temporary workspace for each instance
self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids))
self.net = core.Net(predict_net)
logger.info("Running init_net once to fill the parameters ...")
with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
ws.RunNetOnce(init_net)
uninitialized_external_input = []
for blob in self.net.Proto().external_input:
if blob not in ws.Blobs():
uninitialized_external_input.append(blob)
ws.CreateBlob(blob)
ws.CreateNet(self.net)
self._error_msgs = set()
self._input_blobs = uninitialized_external_input
def _infer_output_devices(self, inputs):
def _get_device_type(torch_tensor):
assert torch_tensor.device.type in ["cpu", "cuda"]
assert torch_tensor.device.index == 0
return torch_tensor.device.type
predict_net = self.net.Proto()
input_device_types = {
(name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs)
}
device_type_map = infer_device_type(
predict_net, known_status=input_device_types, device_name_style="pytorch"
)
ssa, versions = core.get_ssa(predict_net)
versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
output_devices = [device_type_map[outp] for outp in versioned_outputs]
return output_devices
def forward(self, inputs):
assert len(inputs) == len(self._input_blobs), (
f"Length of inputs ({len(inputs)}) "
f"doesn't match the required input blobs: {self._input_blobs}"
)
with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
for b, tensor in zip(self._input_blobs, inputs):
ws.FeedBlob(b, tensor)
try:
ws.RunNet(self.net.Proto().name)
except RuntimeError as e:
if not str(e) in self._error_msgs:
self._error_msgs.add(str(e))
logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
logger.warning("Catch the error and use partial results.")
c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
for b in self.net.Proto().external_output:
ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).")
# Cast output to torch.Tensor on the desired device
output_devices = (
self._infer_output_devices(inputs)
if any(t.device.type != "cpu" for t in inputs)
else ["cpu" for _ in self.net.Proto().external_output]
)
outputs = []
for name, c2_output, device in zip(
self.net.Proto().external_output, c2_outputs, output_devices
):
if not isinstance(c2_output, np.ndarray):
raise RuntimeError(
"Invalid output for blob {}, received: {}".format(name, c2_output)
)
outputs.append(torch.Tensor(c2_output).to(device=device))
# TODO change to tuple in the future
return dict(zip(self.net.Proto().external_output, outputs))
class ProtobufDetectionModel(torch.nn.Module):
def __init__(self, predict_net, init_net, *, convert_outputs=None):
super().__init__()
self.protobuf_model = ProtobufModel(predict_net, init_net)
self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
if convert_outputs is None:
meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
else:
self._convert_outputs = convert_outputs
def _convert_inputs(self, batched_inputs):
# currently all models convert inputs in the same way
return convert_batched_inputs_to_c2_format(
batched_inputs, self.size_divisibility, self.device
)
def forward(self, batched_inputs):
c2_inputs = self._convert_inputs(batched_inputs)
c2_results = self.protobuf_model(c2_inputs)
return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
| true
| true
|
1c410ab33284aa4eeceacdbce6394607fcb2d075
| 9,354
|
py
|
Python
|
brozzler/__init__.py
|
vbanos/brozzler
|
261e7977ad198cfed160b89498d289267af8dd97
|
[
"Apache-2.0"
] | 1
|
2017-07-04T20:50:26.000Z
|
2017-07-04T20:50:26.000Z
|
brozzler/__init__.py
|
vbanos/brozzler
|
261e7977ad198cfed160b89498d289267af8dd97
|
[
"Apache-2.0"
] | null | null | null |
brozzler/__init__.py
|
vbanos/brozzler
|
261e7977ad198cfed160b89498d289267af8dd97
|
[
"Apache-2.0"
] | null | null | null |
"""
brozzler/__init__.py - __init__.py for brozzler package, contains some common
code
Copyright (C) 2014-2017 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pkg_resources import get_distribution as _get_distribution
__version__ = _get_distribution('brozzler').version
class ShutdownRequested(Exception):
pass
class NothingToClaim(Exception):
pass
class CrawlStopped(Exception):
pass
class ProxyError(Exception):
pass
class ReachedLimit(Exception):
def __init__(self, http_error=None, warcprox_meta=None, http_payload=None):
import json
if http_error:
if "warcprox-meta" in http_error.headers:
self.warcprox_meta = json.loads(
http_error.headers["warcprox-meta"])
else:
self.warcprox_meta = None
self.http_payload = http_error.read()
elif warcprox_meta:
self.warcprox_meta = warcprox_meta
self.http_payload = http_payload
def __repr__(self):
return "ReachedLimit(warcprox_meta=%r,http_payload=%r)" % (
self.warcprox_meta if hasattr(self, 'warcprox_meta') else None,
self.http_payload if hasattr(self, 'http_payload') else None)
def __str__(self):
return self.__repr__()
# monkey-patch log level TRACE
TRACE = 5
import logging
def _logging_trace(msg, *args, **kwargs):
logging.root.trace(msg, *args, **kwargs)
def _logger_trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
logging.trace = _logging_trace
logging.Logger.trace = _logger_trace
logging._levelToName[TRACE] = 'TRACE'
logging._nameToLevel['TRACE'] = TRACE
_behaviors = None
def behaviors():
import os, yaml, string
global _behaviors
if _behaviors is None:
behaviors_yaml = os.path.join(
os.path.dirname(__file__), 'behaviors.yaml')
with open(behaviors_yaml) as fin:
_behaviors = yaml.load(fin)
return _behaviors
def behavior_script(url, template_parameters=None):
'''
Returns the javascript behavior string populated with template_parameters.
'''
import re, logging
for behavior in behaviors():
if re.match(behavior['url_regex'], url):
parameters = dict()
if 'default_parameters' in behavior:
parameters.update(behavior['default_parameters'])
if template_parameters:
parameters.update(template_parameters)
template = jinja2_environment().get_template(
behavior['behavior_js_template'])
script = template.render(parameters)
logging.info(
'using template=%r populated with parameters=%r for %r',
behavior['behavior_js_template'], parameters, url)
return script
return None
class ThreadExceptionGate:
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, thread):
self.thread = thread
self.ok_to_raise = threading.Event()
self.pending_exception = None
self.lock = threading.RLock()
def __enter__(self):
assert self.thread == threading.current_thread()
if self.pending_exception:
self.logger.info(
'raising pending exception %s', self.pending_exception)
tmp = self.pending_exception
self.pending_exception = None
raise tmp
else:
self.ok_to_raise.set()
return self
def __exit__(self, exc_type, exc_value, traceback):
assert self.thread == threading.current_thread()
self.ok_to_raise.clear()
return False # don't swallow exception
def queue_exception(self, e):
with self.lock:
if self.pending_exception:
self.logger.warn(
'%r already pending for thread %r, discarding %r',
self.pending_exception, self.thread, e)
else:
self.pending_exception = e
def __repr__(self):
return '<ThreadExceptionGate(%s)>' % self.thread
import threading
_thread_exception_gates = {}
_thread_exception_gates_lock = threading.Lock()
def thread_exception_gate(thread=None):
'''
Returns a `ThreadExceptionGate` for `thread` (current thread by default).
`ThreadExceptionGate` is a context manager which allows exceptions to be
raised from threads other than the current one, by way of `thread_raise`.
Example:
try:
with thread_exception_gate():
# do something
except:
# handle exception....
If `thread_raise` is called on a thread that is not currently inside the
`ThreadExceptionGate` context (pep340 "runtime environment"), the exception
is queued, and raised immediately if and when the thread enters the
context. Only one exception will be queued this way at a time, others are
discarded.
'''
if not thread:
thread = threading.current_thread()
with _thread_exception_gates_lock:
if not thread in _thread_exception_gates:
_thread_exception_gates[thread] = ThreadExceptionGate(thread)
return _thread_exception_gates[thread]
thread_accept_exceptions = thread_exception_gate
def thread_raise(thread, exctype):
'''
Raises or queues the exception `exctype` for the thread `thread`.
See the documentation on the function `thread_exception_gate()` for more
information.
Adapted from http://tomerfiliba.com/recipes/Thread2/ which explains:
"The exception will be raised only when executing python bytecode. If your
thread calls a native/built-in blocking function, the exception will be
raised only when execution returns to the python code."
Raises:
TypeError if `exctype` is not a class
ValueError, SystemError in case of unexpected problems
'''
import ctypes, inspect, threading, logging
if not inspect.isclass(exctype):
raise TypeError(
'cannot raise %s, only exception types can be raised (not '
'instances)' % exctype)
gate = thread_exception_gate(thread)
with gate.lock:
if gate.ok_to_raise.is_set() and thread.is_alive():
gate.ok_to_raise.clear()
logging.info('raising %s in thread %s', exctype, thread)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident), ctypes.py_object(exctype))
if res == 0:
raise ValueError(
'invalid thread id? thread.ident=%s' % thread.ident)
elif res != 1:
# if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, 0)
raise SystemError('PyThreadState_SetAsyncExc failed')
else:
logging.info('queueing %s for thread %s', exctype, thread)
gate.queue_exception(exctype)
def sleep(duration):
'''
Sleeps for duration seconds in increments of 0.5 seconds.
Use this so that the sleep can be interrupted by thread_raise().
'''
import time
start = time.time()
while True:
elapsed = time.time() - start
if elapsed >= duration:
break
time.sleep(min(duration - elapsed, 0.5))
_jinja2_env = None
def jinja2_environment():
global _jinja2_env
if not _jinja2_env:
import jinja2, json
_jinja2_env = jinja2.Environment(
loader=jinja2.PackageLoader('brozzler', 'js-templates'))
_jinja2_env.filters['json'] = json.dumps
return _jinja2_env
import urlcanon
def _remove_query(url):
url.question_mark = b''
url.query = b''
# XXX chop off path after last slash??
site_surt_canon = urlcanon.Canonicalizer(
urlcanon.semantic.steps + [_remove_query])
import doublethink
import datetime
EPOCH_UTC = datetime.datetime.utcfromtimestamp(0.0).replace(
tzinfo=doublethink.UTC)
from brozzler.worker import BrozzlerWorker
from brozzler.robots import is_permitted_by_robots
from brozzler.frontier import RethinkDbFrontier
from brozzler.browser import Browser, BrowserPool, BrowsingException
from brozzler.model import (
new_job, new_job_file, new_site, Job, Page, Site, InvalidJobConf)
from brozzler.cli import suggest_default_chrome_exe
__all__ = ['Page', 'Site', 'BrozzlerWorker', 'is_permitted_by_robots',
'RethinkDbFrontier', 'Browser', 'BrowserPool', 'BrowsingException',
'new_job', 'new_site', 'Job', 'new_job_file', 'InvalidJobConf',
'sleep', 'thread_accept_exceptions', 'thread_raise']
| 35.165414
| 81
| 0.665063
|
from pkg_resources import get_distribution as _get_distribution
__version__ = _get_distribution('brozzler').version
class ShutdownRequested(Exception):
pass
class NothingToClaim(Exception):
pass
class CrawlStopped(Exception):
pass
class ProxyError(Exception):
pass
class ReachedLimit(Exception):
def __init__(self, http_error=None, warcprox_meta=None, http_payload=None):
import json
if http_error:
if "warcprox-meta" in http_error.headers:
self.warcprox_meta = json.loads(
http_error.headers["warcprox-meta"])
else:
self.warcprox_meta = None
self.http_payload = http_error.read()
elif warcprox_meta:
self.warcprox_meta = warcprox_meta
self.http_payload = http_payload
def __repr__(self):
return "ReachedLimit(warcprox_meta=%r,http_payload=%r)" % (
self.warcprox_meta if hasattr(self, 'warcprox_meta') else None,
self.http_payload if hasattr(self, 'http_payload') else None)
def __str__(self):
return self.__repr__()
TRACE = 5
import logging
def _logging_trace(msg, *args, **kwargs):
logging.root.trace(msg, *args, **kwargs)
def _logger_trace(self, msg, *args, **kwargs):
if self.isEnabledFor(TRACE):
self._log(TRACE, msg, args, **kwargs)
logging.trace = _logging_trace
logging.Logger.trace = _logger_trace
logging._levelToName[TRACE] = 'TRACE'
logging._nameToLevel['TRACE'] = TRACE
_behaviors = None
def behaviors():
import os, yaml, string
global _behaviors
if _behaviors is None:
behaviors_yaml = os.path.join(
os.path.dirname(__file__), 'behaviors.yaml')
with open(behaviors_yaml) as fin:
_behaviors = yaml.load(fin)
return _behaviors
def behavior_script(url, template_parameters=None):
import re, logging
for behavior in behaviors():
if re.match(behavior['url_regex'], url):
parameters = dict()
if 'default_parameters' in behavior:
parameters.update(behavior['default_parameters'])
if template_parameters:
parameters.update(template_parameters)
template = jinja2_environment().get_template(
behavior['behavior_js_template'])
script = template.render(parameters)
logging.info(
'using template=%r populated with parameters=%r for %r',
behavior['behavior_js_template'], parameters, url)
return script
return None
class ThreadExceptionGate:
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, thread):
self.thread = thread
self.ok_to_raise = threading.Event()
self.pending_exception = None
self.lock = threading.RLock()
def __enter__(self):
assert self.thread == threading.current_thread()
if self.pending_exception:
self.logger.info(
'raising pending exception %s', self.pending_exception)
tmp = self.pending_exception
self.pending_exception = None
raise tmp
else:
self.ok_to_raise.set()
return self
def __exit__(self, exc_type, exc_value, traceback):
assert self.thread == threading.current_thread()
self.ok_to_raise.clear()
return False
def queue_exception(self, e):
with self.lock:
if self.pending_exception:
self.logger.warn(
'%r already pending for thread %r, discarding %r',
self.pending_exception, self.thread, e)
else:
self.pending_exception = e
def __repr__(self):
return '<ThreadExceptionGate(%s)>' % self.thread
import threading
_thread_exception_gates = {}
_thread_exception_gates_lock = threading.Lock()
def thread_exception_gate(thread=None):
if not thread:
thread = threading.current_thread()
with _thread_exception_gates_lock:
if not thread in _thread_exception_gates:
_thread_exception_gates[thread] = ThreadExceptionGate(thread)
return _thread_exception_gates[thread]
thread_accept_exceptions = thread_exception_gate
def thread_raise(thread, exctype):
import ctypes, inspect, threading, logging
if not inspect.isclass(exctype):
raise TypeError(
'cannot raise %s, only exception types can be raised (not '
'instances)' % exctype)
gate = thread_exception_gate(thread)
with gate.lock:
if gate.ok_to_raise.is_set() and thread.is_alive():
gate.ok_to_raise.clear()
logging.info('raising %s in thread %s', exctype, thread)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident), ctypes.py_object(exctype))
if res == 0:
raise ValueError(
'invalid thread id? thread.ident=%s' % thread.ident)
elif res != 1:
# if it returns a number greater than one, you're in trouble,
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, 0)
raise SystemError('PyThreadState_SetAsyncExc failed')
else:
logging.info('queueing %s for thread %s', exctype, thread)
gate.queue_exception(exctype)
def sleep(duration):
import time
start = time.time()
while True:
elapsed = time.time() - start
if elapsed >= duration:
break
time.sleep(min(duration - elapsed, 0.5))
_jinja2_env = None
def jinja2_environment():
global _jinja2_env
if not _jinja2_env:
import jinja2, json
_jinja2_env = jinja2.Environment(
loader=jinja2.PackageLoader('brozzler', 'js-templates'))
_jinja2_env.filters['json'] = json.dumps
return _jinja2_env
import urlcanon
def _remove_query(url):
url.question_mark = b''
url.query = b''
site_surt_canon = urlcanon.Canonicalizer(
urlcanon.semantic.steps + [_remove_query])
import doublethink
import datetime
EPOCH_UTC = datetime.datetime.utcfromtimestamp(0.0).replace(
tzinfo=doublethink.UTC)
from brozzler.worker import BrozzlerWorker
from brozzler.robots import is_permitted_by_robots
from brozzler.frontier import RethinkDbFrontier
from brozzler.browser import Browser, BrowserPool, BrowsingException
from brozzler.model import (
new_job, new_job_file, new_site, Job, Page, Site, InvalidJobConf)
from brozzler.cli import suggest_default_chrome_exe
__all__ = ['Page', 'Site', 'BrozzlerWorker', 'is_permitted_by_robots',
'RethinkDbFrontier', 'Browser', 'BrowserPool', 'BrowsingException',
'new_job', 'new_site', 'Job', 'new_job_file', 'InvalidJobConf',
'sleep', 'thread_accept_exceptions', 'thread_raise']
| true
| true
|
1c410abd240935e34593300877903802761cbc3e
| 2,902
|
py
|
Python
|
examples/views/persistent.py
|
Ryomen-Sukuna/discord.py
|
0bcb0d0e3ce395d42a5b1dae61b0090791ee018d
|
[
"MIT"
] | 1
|
2021-09-11T09:24:38.000Z
|
2021-09-11T09:24:38.000Z
|
examples/views/persistent.py
|
Ryomen-Sukuna/discord.py
|
0bcb0d0e3ce395d42a5b1dae61b0090791ee018d
|
[
"MIT"
] | 1
|
2022-02-19T18:25:19.000Z
|
2022-02-19T18:25:19.000Z
|
examples/views/persistent.py
|
Ryomen-Sukuna/discord.py
|
0bcb0d0e3ce395d42a5b1dae61b0090791ee018d
|
[
"MIT"
] | null | null | null |
# This example requires the 'message_content' privileged intent to function.
from discord.ext import commands
import discord
# Define a simple View that persists between bot restarts
# In order a view to persist between restarts it needs to meet the following conditions:
# 1) The timeout of the View has to be set to None
# 2) Every item in the View has to have a custom_id set
# It is recommended that the custom_id be sufficiently unique to
# prevent conflicts with other buttons the bot sends.
# For this example the custom_id is prefixed with the name of the bot.
# Note that custom_ids can only be up to 100 characters long.
class PersistentView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.button(label='Green', style=discord.ButtonStyle.green, custom_id='persistent_view:green')
async def green(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.send_message('This is green.', ephemeral=True)
@discord.ui.button(label='Red', style=discord.ButtonStyle.red, custom_id='persistent_view:red')
async def red(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.send_message('This is red.', ephemeral=True)
@discord.ui.button(label='Grey', style=discord.ButtonStyle.grey, custom_id='persistent_view:grey')
async def grey(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.send_message('This is grey.', ephemeral=True)
class PersistentViewBot(commands.Bot):
def __init__(self):
intents = discord.Intents.default()
intents.message_content = True
super().__init__(command_prefix=commands.when_mentioned_or('$'), intents=intents)
async def setup_hook(self) -> None:
# Register the persistent view for listening here.
# Note that this does not send the view to any message.
# In order to do this you need to first send a message with the View, which is shown below.
# If you have the message_id you can also pass it as a keyword argument, but for this example
# we don't have one.
self.add_view(PersistentView())
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
bot = PersistentViewBot()
@bot.command()
@commands.is_owner()
async def prepare(ctx: commands.Context):
"""Starts a persistent view."""
# In order for a persistent view to be listened to, it needs to be sent to an actual message.
# Call this method once just to store it somewhere.
# In a more complicated program you might fetch the message_id from a database for use later.
# However this is outside of the scope of this simple example.
await ctx.send("What's your favourite colour?", view=PersistentView())
bot.run('token')
| 43.313433
| 105
| 0.723639
|
from discord.ext import commands
import discord
class PersistentView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
@discord.ui.button(label='Green', style=discord.ButtonStyle.green, custom_id='persistent_view:green')
async def green(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.send_message('This is green.', ephemeral=True)
@discord.ui.button(label='Red', style=discord.ButtonStyle.red, custom_id='persistent_view:red')
async def red(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.send_message('This is red.', ephemeral=True)
@discord.ui.button(label='Grey', style=discord.ButtonStyle.grey, custom_id='persistent_view:grey')
async def grey(self, interaction: discord.Interaction, button: discord.ui.Button):
await interaction.response.send_message('This is grey.', ephemeral=True)
class PersistentViewBot(commands.Bot):
def __init__(self):
intents = discord.Intents.default()
intents.message_content = True
super().__init__(command_prefix=commands.when_mentioned_or('$'), intents=intents)
async def setup_hook(self) -> None:
self.add_view(PersistentView())
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
bot = PersistentViewBot()
@bot.command()
@commands.is_owner()
async def prepare(ctx: commands.Context):
# In order for a persistent view to be listened to, it needs to be sent to an actual message.
# Call this method once just to store it somewhere.
# In a more complicated program you might fetch the message_id from a database for use later.
# However this is outside of the scope of this simple example.
await ctx.send("What's your favourite colour?", view=PersistentView())
bot.run('token')
| true
| true
|
1c410b65d56b03b162f07a7975e0be44700aa9d5
| 2,131
|
py
|
Python
|
blue/tests/test_daedalus.py
|
cglewis/Daedalus
|
1ccd1a35ea4f343cf45ff629785ced284ec98b81
|
[
"Apache-2.0"
] | 8
|
2021-07-15T23:26:57.000Z
|
2022-02-28T22:26:19.000Z
|
blue/tests/test_daedalus.py
|
cglewis/Daedalus
|
1ccd1a35ea4f343cf45ff629785ced284ec98b81
|
[
"Apache-2.0"
] | 102
|
2021-05-07T15:06:52.000Z
|
2022-03-31T10:49:08.000Z
|
blue/tests/test_daedalus.py
|
cglewis/Daedalus
|
1ccd1a35ea4f343cf45ff629785ced284ec98b81
|
[
"Apache-2.0"
] | 7
|
2021-03-09T19:17:59.000Z
|
2022-01-27T17:37:47.000Z
|
# -*- coding: utf-8 -*-
"""
Test module for daedalus.
@author: Charlie Lewis
"""
from daedalus.daedalus import Daedalus
def test_start_remove_dovesnap():
instance = Daedalus()
# hack conf_dir since it's not installed as a library
instance.set_config_dir(conf_dir='/..')
instance.start_dovesnap()
instance.remove_dovesnap()
instance.reset_cwd()
def test_build_images():
instance = Daedalus()
# hack conf_dir since it's not installed as a library
instance.set_config_dir(conf_dir='/..')
instance.build_dockers(srsran=True, ueransim=True,
open5gs=True, srsran_lime=True)
instance.reset_cwd()
def test_create_remove_networks():
instance = Daedalus()
# hack conf_dir since it's not installed as a library
instance.set_config_dir(conf_dir='/..')
instance.start_dovesnap()
instance.create_networks()
instance.remove_networks()
instance.remove_dovesnap()
instance.reset_cwd()
def test_start_no_services():
instance = Daedalus()
instance.start_services()
def test_remove_no_services():
instance = Daedalus()
instance.remove_services()
def test_start_remove_services():
instance = Daedalus()
# hack conf_dir since it's not installed as a library
instance.set_config_dir(conf_dir='/..')
instance.start_dovesnap()
instance.create_networks()
instance.compose_files = ['-f', 'core/epc.yml',
'-f', 'core/upn.yml', '-f', 'core/db.yml']
instance.start_services()
instance.remove_services()
instance.cleanup()
def test_main_questions():
instance = Daedalus()
instance.main_questions()
def test_global_number_questions():
instance = Daedalus()
instance.global_number_questions('enb')
def test_sdr_questions():
instance = Daedalus()
instance.sdr_questions('enb')
def test_imsi_questions():
instance = Daedalus()
instance.imsi_questions()
def test_running_questions():
instance = Daedalus()
instance.running_questions()
def test_check_commands():
instance = Daedalus()
instance.check_commands()
| 23.94382
| 72
| 0.691225
|
from daedalus.daedalus import Daedalus
def test_start_remove_dovesnap():
instance = Daedalus()
instance.set_config_dir(conf_dir='/..')
instance.start_dovesnap()
instance.remove_dovesnap()
instance.reset_cwd()
def test_build_images():
instance = Daedalus()
# hack conf_dir since it's not installed as a library
instance.set_config_dir(conf_dir='/..')
instance.build_dockers(srsran=True, ueransim=True,
open5gs=True, srsran_lime=True)
instance.reset_cwd()
def test_create_remove_networks():
instance = Daedalus()
instance.set_config_dir(conf_dir='/..')
instance.start_dovesnap()
instance.create_networks()
instance.remove_networks()
instance.remove_dovesnap()
instance.reset_cwd()
def test_start_no_services():
instance = Daedalus()
instance.start_services()
def test_remove_no_services():
instance = Daedalus()
instance.remove_services()
def test_start_remove_services():
instance = Daedalus()
# hack conf_dir since it's not installed as a library
instance.set_config_dir(conf_dir='/..')
instance.start_dovesnap()
instance.create_networks()
instance.compose_files = ['-f', 'core/epc.yml',
'-f', 'core/upn.yml', '-f', 'core/db.yml']
instance.start_services()
instance.remove_services()
instance.cleanup()
def test_main_questions():
instance = Daedalus()
instance.main_questions()
def test_global_number_questions():
instance = Daedalus()
instance.global_number_questions('enb')
def test_sdr_questions():
instance = Daedalus()
instance.sdr_questions('enb')
def test_imsi_questions():
instance = Daedalus()
instance.imsi_questions()
def test_running_questions():
instance = Daedalus()
instance.running_questions()
def test_check_commands():
instance = Daedalus()
instance.check_commands()
| true
| true
|
1c410cf9fec954b97729a2db7677deaef04366e0
| 746
|
py
|
Python
|
blender/arm/logicnode/postprocess_colorgrading_get_shadow_node.py
|
ValtoGameEngines/Armory
|
ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32
|
[
"Zlib"
] | 1
|
2021-03-17T05:51:45.000Z
|
2021-03-17T05:51:45.000Z
|
blender/arm/logicnode/postprocess_colorgrading_get_shadow_node.py
|
ValtoGameEngines/Armory
|
ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/postprocess_colorgrading_get_shadow_node.py
|
ValtoGameEngines/Armory
|
ad3d3c63e64e9225e62b414b7ec4dd9fb93fab32
|
[
"Zlib"
] | 1
|
2020-06-29T07:54:21.000Z
|
2020-06-29T07:54:21.000Z
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class ColorgradingGetShadowNode(Node, ArmLogicTreeNode):
'''Colorgrading Get Shadow node'''
bl_idname = 'LNColorgradingGetShadowNode'
bl_label = 'Colorgrading Get Shadow'
bl_icon = 'QUESTION'
def init(self, context):
self.outputs.new('NodeSocketFloat', 'ShadowMax')
self.outputs.new('NodeSocketVector', 'Saturation')
self.outputs.new('NodeSocketVector', 'Contrast')
self.outputs.new('NodeSocketVector', 'Gamma')
self.outputs.new('NodeSocketVector', 'Gain')
self.outputs.new('NodeSocketVector', 'Offset')
add_node(ColorgradingGetShadowNode, category='Postprocess')
| 37.3
| 59
| 0.718499
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class ColorgradingGetShadowNode(Node, ArmLogicTreeNode):
bl_idname = 'LNColorgradingGetShadowNode'
bl_label = 'Colorgrading Get Shadow'
bl_icon = 'QUESTION'
def init(self, context):
self.outputs.new('NodeSocketFloat', 'ShadowMax')
self.outputs.new('NodeSocketVector', 'Saturation')
self.outputs.new('NodeSocketVector', 'Contrast')
self.outputs.new('NodeSocketVector', 'Gamma')
self.outputs.new('NodeSocketVector', 'Gain')
self.outputs.new('NodeSocketVector', 'Offset')
add_node(ColorgradingGetShadowNode, category='Postprocess')
| true
| true
|
1c410e2cc351e81603190ff16d74ece20704ff16
| 256
|
py
|
Python
|
Problem1/Collections/Collections.deque().py
|
Joanna-O-Ben/ADM-HW1
|
0a914d4ab5462fa563980644d3f5d777af61aef9
|
[
"MIT"
] | null | null | null |
Problem1/Collections/Collections.deque().py
|
Joanna-O-Ben/ADM-HW1
|
0a914d4ab5462fa563980644d3f5d777af61aef9
|
[
"MIT"
] | null | null | null |
Problem1/Collections/Collections.deque().py
|
Joanna-O-Ben/ADM-HW1
|
0a914d4ab5462fa563980644d3f5d777af61aef9
|
[
"MIT"
] | null | null | null |
# Enter your code here. Read input from STDIN. Print output to STDOUT
from collections import deque
d = deque()
for _ in range(int(input())):
inp = input().split()
getattr(d, inp[0])(*[inp[1]] if len(inp) > 1 else [])
print(*[item for item in d])
| 28.444444
| 69
| 0.648438
|
from collections import deque
d = deque()
for _ in range(int(input())):
inp = input().split()
getattr(d, inp[0])(*[inp[1]] if len(inp) > 1 else [])
print(*[item for item in d])
| true
| true
|
1c410e5c21526e59865d845fbef59dfe155a5f3e
| 1,612
|
py
|
Python
|
MNIST/src/utils.py
|
AnesBenmerzoug/Machine-Learning-Projects
|
c52b3f55968c042a20299473fb124b75cc410ce0
|
[
"MIT"
] | 1
|
2020-05-02T18:50:11.000Z
|
2020-05-02T18:50:11.000Z
|
MNIST/src/utils.py
|
AnesBenmerzoug/Machine-Learning-Projects
|
c52b3f55968c042a20299473fb124b75cc410ce0
|
[
"MIT"
] | null | null | null |
MNIST/src/utils.py
|
AnesBenmerzoug/Machine-Learning-Projects
|
c52b3f55968c042a20299473fb124b75cc410ce0
|
[
"MIT"
] | null | null | null |
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
def imgshow(img):
plt.imshow(np.transpose(make_grid(img).numpy(), (1, 2, 0)))
plt.show()
def plotlosses(losses, title="", xlabel="", ylabel=""):
epochs = np.arange(losses.size) + 1
plt.plot(epochs, losses)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
def plotaccuracy(accuracy, classes, title="", xlabel="", ylabel=""):
indices = np.arange(len(classes))
width = 0.35
bar = plt.bar(indices, accuracy, width)
for idx, rect in enumerate(bar):
height = rect.get_height()
plt.text(
rect.get_x() + rect.get_width() / 2.0,
height,
"{:.2f}".format(accuracy[idx]),
ha="center",
va="bottom",
)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.xticks(indices, classes)
plt.show()
def plotconfusion(confusion_matrix, classes, title="", xlabel="", ylabel=""):
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion_matrix)
fig.colorbar(cax)
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_yticks(tick_marks)
ax.set_xticklabels([""] + list(classes), rotation=90)
ax.set_yticklabels([""] + list(classes))
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
| 27.793103
| 77
| 0.635856
|
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
def imgshow(img):
plt.imshow(np.transpose(make_grid(img).numpy(), (1, 2, 0)))
plt.show()
def plotlosses(losses, title="", xlabel="", ylabel=""):
epochs = np.arange(losses.size) + 1
plt.plot(epochs, losses)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
def plotaccuracy(accuracy, classes, title="", xlabel="", ylabel=""):
indices = np.arange(len(classes))
width = 0.35
bar = plt.bar(indices, accuracy, width)
for idx, rect in enumerate(bar):
height = rect.get_height()
plt.text(
rect.get_x() + rect.get_width() / 2.0,
height,
"{:.2f}".format(accuracy[idx]),
ha="center",
va="bottom",
)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.xticks(indices, classes)
plt.show()
def plotconfusion(confusion_matrix, classes, title="", xlabel="", ylabel=""):
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion_matrix)
fig.colorbar(cax)
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_yticks(tick_marks)
ax.set_xticklabels([""] + list(classes), rotation=90)
ax.set_yticklabels([""] + list(classes))
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
| true
| true
|
1c410eea0f136cd9dccd0299a60356e341fd9e0b
| 912
|
py
|
Python
|
indico/modules/categories/legacy.py
|
CrownedSilverFox/conference-platform
|
1858a2908763dc7e4c29d3157369e9aab6064933
|
[
"MIT"
] | 1
|
2021-02-24T10:20:14.000Z
|
2021-02-24T10:20:14.000Z
|
indico/modules/categories/legacy.py
|
CrownedSilverFox/conference-platform
|
1858a2908763dc7e4c29d3157369e9aab6064933
|
[
"MIT"
] | 2
|
2015-01-20T22:25:18.000Z
|
2020-07-20T15:27:20.000Z
|
indico/modules/categories/legacy.py
|
CrownedSilverFox/conference-platform
|
1858a2908763dc7e4c29d3157369e9aab6064933
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from lxml import etree
class XMLCategorySerializer:
def __init__(self, category):
self.category = category
def serialize_category(self):
xml = self._serialize_category()
return etree.tostring(xml, pretty_print=True)
def _serialize_category(self):
response = etree.Element('response')
response.append(self._serialize_category_info(self.category))
return response
def _serialize_category_info(self, category):
category_info = etree.Element('categInfo')
etree.SubElement(category_info, 'title').text = category.title
etree.SubElement(category_info, 'id').text = str(category.id)
return category_info
| 31.448276
| 70
| 0.70614
|
from lxml import etree
class XMLCategorySerializer:
def __init__(self, category):
self.category = category
def serialize_category(self):
xml = self._serialize_category()
return etree.tostring(xml, pretty_print=True)
def _serialize_category(self):
response = etree.Element('response')
response.append(self._serialize_category_info(self.category))
return response
def _serialize_category_info(self, category):
category_info = etree.Element('categInfo')
etree.SubElement(category_info, 'title').text = category.title
etree.SubElement(category_info, 'id').text = str(category.id)
return category_info
| true
| true
|
1c410f0ad645bdcc2c4e1a3e1cfaf1433fdbc3b6
| 5,624
|
py
|
Python
|
tests/repository/test_convert.py
|
giganticode/langmodels
|
53462a755eb07cbbccdd179605185059634d6ac4
|
[
"MIT"
] | 9
|
2020-01-21T13:34:49.000Z
|
2022-03-18T02:34:30.000Z
|
tests/repository/test_convert.py
|
giganticode/langmodels
|
53462a755eb07cbbccdd179605185059634d6ac4
|
[
"MIT"
] | 6
|
2020-05-02T07:03:26.000Z
|
2022-03-11T23:48:38.000Z
|
tests/repository/test_convert.py
|
giganticode/langmodels
|
53462a755eb07cbbccdd179605185059634d6ac4
|
[
"MIT"
] | 3
|
2019-08-03T00:06:19.000Z
|
2020-05-07T00:37:19.000Z
|
import os
from sys import platform
from typing import Dict
import jsons
import pytest
from langmodels import project_dir
from langmodels.repository.convert import convert_dict
config_v002_gru_cosine = {"arch": {
"bidir": False, "clip": 0.3,
"adam_betas": [0.7, 0.99],
"reg_fn": {"alpha": 2.1, "beta": 1.1},
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.2-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training_procedure": {
"schedule": {"cyc_len": 3, "early_stop": {"patience": 3}, "max_epochs": 1, "max_lr": 0.0001},
"weight_decay": 1e-06}}
config_v002_lstm_rafael = {"arch": {
"bidir": False, "clip": 0.3,
"adam_betas": [0.9, 0.99],
"reg_fn": {"alpha": 2.1, "beta": 1.1}, "qrnn": False,
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.2-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training_procedure": {
"schedule": {"mult_coeff": 0.5, "max_epochs": 1, "init_lr": 0.0001, "patience": 3, "max_lr_reduction_times": 6},
"weight_decay": 1e-06}}
config_v003_gru_cosine = {"arch": {
"name": "gru",
"bidir": False,
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.3-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training": {
"gradient_clip": 0.3,
"activation_regularization": {"alpha": 2.1, "beta": 1.1},
"optimizer": {"name": "Adam", "betas": [0.7, 0.99]},
"schedule": {"name": "cosine", "cyc_len": 3, "early_stop": {"patience": 3}, "max_epochs": 1, "max_lr": 0.0001},
"files_per_epoch": 50000,
"weight_decay": 1e-06}}
config_v003_lstm_rafael = {"arch": {
"name": "lstm",
"bidir": False, "qrnn": False,
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.3-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training": {
"gradient_clip": 0.3,
"activation_regularization": {"alpha": 2.1, "beta": 1.1},
"optimizer": {"name": "Adam", "betas": [0.9, 0.99]},
"schedule": {"name": "rafael", "mult_coeff": 0.5, "max_epochs": 1, "init_lr": 0.0001, "patience": 3, "max_lr_reduction_times": 6},
"files_per_epoch": 50000,
"weight_decay": 1e-06}}
metrics_v002 = {"bin_entropy": 2.1455788479, "n_epochs": 6, "best_epoch": 5, "training_time_minutes_per_epoch": 1429, "trainable_params": 27726250, "size_on_disk_mb": 350, "config_version": "0.0.2-alpha.0"}
metrics_v003 = {"bin_entropy": 2.1455788479, "n_epochs": 6, "best_epoch": 5, "training_time_minutes_per_epoch": 1429, "trainable_params": 27726250, "size_on_disk_mb": 350, "config_version": "0.0.3-alpha.0"}
@pytest.mark.skipif(platform != "linux", reason="jq is complicated to install on OSx and Windows")
def test_003_to_002():
assert convert_dict(config_v003_gru_cosine, 'config', '0.0.2-alpha.0') == config_v002_gru_cosine
assert convert_dict(config_v003_lstm_rafael, 'config', '0.0.2-alpha.0') == config_v002_lstm_rafael
assert convert_dict(metrics_v003, 'metrics', '0.0.2-alpha.0') == metrics_v002
def _get_transformation_dict(version: str) -> Dict[str, str]:
path_to_tranformation_string = os.path.join(project_dir, 'converters', 'forward', f'{version}.jq')
with open(path_to_tranformation_string, 'r') as f:
serialized_transformation_dict = f.read()
transformation_dict = jsons.loads(serialized_transformation_dict)
return transformation_dict
@pytest.mark.skipif(platform != "linux", reason="jq is complicated to install on OSx and Windows")
def test_002_to_003():
from jq import jq
version = '0.0.2-alpha.0'
tranformation_string = _get_transformation_dict(version)
assert jq(tranformation_string['config']).transform(config_v002_gru_cosine) == config_v003_gru_cosine
assert jq(tranformation_string['config']).transform(config_v002_lstm_rafael) == config_v003_lstm_rafael
assert jq(tranformation_string['metrics']).transform(metrics_v002) == metrics_v003
| 52.560748
| 206
| 0.635846
|
import os
from sys import platform
from typing import Dict
import jsons
import pytest
from langmodels import project_dir
from langmodels.repository.convert import convert_dict
config_v002_gru_cosine = {"arch": {
"bidir": False, "clip": 0.3,
"adam_betas": [0.7, 0.99],
"reg_fn": {"alpha": 2.1, "beta": 1.1},
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.2-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training_procedure": {
"schedule": {"cyc_len": 3, "early_stop": {"patience": 3}, "max_epochs": 1, "max_lr": 0.0001},
"weight_decay": 1e-06}}
config_v002_lstm_rafael = {"arch": {
"bidir": False, "clip": 0.3,
"adam_betas": [0.9, 0.99],
"reg_fn": {"alpha": 2.1, "beta": 1.1}, "qrnn": False,
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.2-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training_procedure": {
"schedule": {"mult_coeff": 0.5, "max_epochs": 1, "init_lr": 0.0001, "patience": 3, "max_lr_reduction_times": 6},
"weight_decay": 1e-06}}
config_v003_gru_cosine = {"arch": {
"name": "gru",
"bidir": False,
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.3-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training": {
"gradient_clip": 0.3,
"activation_regularization": {"alpha": 2.1, "beta": 1.1},
"optimizer": {"name": "Adam", "betas": [0.7, 0.99]},
"schedule": {"name": "cosine", "cyc_len": 3, "early_stop": {"patience": 3}, "max_epochs": 1, "max_lr": 0.0001},
"files_per_epoch": 50000,
"weight_decay": 1e-06}}
config_v003_lstm_rafael = {"arch": {
"name": "lstm",
"bidir": False, "qrnn": False,
"drop": {"multiplier": 0.5, "out": 0.1, "oute": 0.02, "outh": 0.15, "outi": 0.25, "w": 0.2},
"emb_sz": 10, "n_hid": 10, "n_layers": 1, "out_bias": True, "tie_weights": True},
"base_model": None, "bptt": 10, "bs": 5, "config_version": "0.0.3-alpha.0",
"corpus": {"extensions": "java", "path": "/home/lv71161/hlibbabii/raw_datasets/dev"},
"prep_function": {"callable": "bpe", "params": ["10k"],
"options": {"no_str": False, "no_com": False, "no_spaces": True, "no_unicode": True, "max_str_length": 922337203}},
"training": {
"gradient_clip": 0.3,
"activation_regularization": {"alpha": 2.1, "beta": 1.1},
"optimizer": {"name": "Adam", "betas": [0.9, 0.99]},
"schedule": {"name": "rafael", "mult_coeff": 0.5, "max_epochs": 1, "init_lr": 0.0001, "patience": 3, "max_lr_reduction_times": 6},
"files_per_epoch": 50000,
"weight_decay": 1e-06}}
metrics_v002 = {"bin_entropy": 2.1455788479, "n_epochs": 6, "best_epoch": 5, "training_time_minutes_per_epoch": 1429, "trainable_params": 27726250, "size_on_disk_mb": 350, "config_version": "0.0.2-alpha.0"}
metrics_v003 = {"bin_entropy": 2.1455788479, "n_epochs": 6, "best_epoch": 5, "training_time_minutes_per_epoch": 1429, "trainable_params": 27726250, "size_on_disk_mb": 350, "config_version": "0.0.3-alpha.0"}
@pytest.mark.skipif(platform != "linux", reason="jq is complicated to install on OSx and Windows")
def test_003_to_002():
assert convert_dict(config_v003_gru_cosine, 'config', '0.0.2-alpha.0') == config_v002_gru_cosine
assert convert_dict(config_v003_lstm_rafael, 'config', '0.0.2-alpha.0') == config_v002_lstm_rafael
assert convert_dict(metrics_v003, 'metrics', '0.0.2-alpha.0') == metrics_v002
def _get_transformation_dict(version: str) -> Dict[str, str]:
path_to_tranformation_string = os.path.join(project_dir, 'converters', 'forward', f'{version}.jq')
with open(path_to_tranformation_string, 'r') as f:
serialized_transformation_dict = f.read()
transformation_dict = jsons.loads(serialized_transformation_dict)
return transformation_dict
@pytest.mark.skipif(platform != "linux", reason="jq is complicated to install on OSx and Windows")
def test_002_to_003():
from jq import jq
version = '0.0.2-alpha.0'
tranformation_string = _get_transformation_dict(version)
assert jq(tranformation_string['config']).transform(config_v002_gru_cosine) == config_v003_gru_cosine
assert jq(tranformation_string['config']).transform(config_v002_lstm_rafael) == config_v003_lstm_rafael
assert jq(tranformation_string['metrics']).transform(metrics_v002) == metrics_v003
| true
| true
|
1c4110621ec10a0735d880f891bbb673f8ab720e
| 2,438
|
py
|
Python
|
pandapipes/test/run_tests.py
|
SteffenMeinecke/pandapipes
|
2d0631c053735e4116a145bae9975379135b9c36
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/run_tests.py
|
SteffenMeinecke/pandapipes
|
2d0631c053735e4116a145bae9975379135b9c36
|
[
"BSD-3-Clause"
] | null | null | null |
pandapipes/test/run_tests.py
|
SteffenMeinecke/pandapipes
|
2d0631c053735e4116a145bae9975379135b9c36
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020-2022 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import os
import pytest
from pandapipes.test import test_path
try:
import coverage as cov
except ImportError:
pass
from pandapower.test.run_tests import _get_cpus
try:
import pplog as logging
# logger = logging.getLogger()
# for handler in logger.handlers:
# logger.removeHandler(handler)
except ImportError:
import logging
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
logger.setLevel(logging.CRITICAL)
def _get_test_dir(pp_module=None):
# helper function to get the test dir and check if it exists
test_dir = test_path
if pp_module is not None and isinstance(pp_module, str):
test_dir = os.path.join(test_dir, pp_module)
if not os.path.isdir(test_dir):
raise ValueError("test_dir {} is not a dir".format(test_dir))
return test_dir
def run_tests(parallel=False, n_cpu=None, coverage=False):
"""
Function to execute all tests or the tests in pppro_module.
:param parallel: If true and pytest-xdist is installed, tests are run in parallel
:type parallel: bool, default False
:param n_cpu: number of CPUs to run the tests on in parallel. Only relevant for parallel runs.
:type n_cpu:int, default None
:param coverage: creates some coverage with coverage module
:type coverage: bool, default False
:return: No Output.
"""
test_dir = _get_test_dir()
if coverage:
cov_tracker = cov.Coverage()
cov_tracker.start()
if parallel:
if n_cpu is None:
n_cpu = _get_cpus()
err = pytest.main([test_dir, "-xs", "-n", str(n_cpu)])
if err == 4:
if err == 4:
raise ModuleNotFoundError("Parallel testing not possible. Please make sure that "
"pytest-xdist is installed correctly.")
elif err > 2:
logger.error("Testing not successfully finished.")
else:
pytest.main([test_dir, "-xs"])
if coverage:
cov_tracker.stop()
cov_tracker.save()
cov_tracker.html_report(ignore_errors=True)
if __name__ == "__main__":
run_tests()
| 30.098765
| 99
| 0.671452
|
import os
import pytest
from pandapipes.test import test_path
try:
import coverage as cov
except ImportError:
pass
from pandapower.test.run_tests import _get_cpus
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
logger.setLevel(logging.CRITICAL)
def _get_test_dir(pp_module=None):
test_dir = test_path
if pp_module is not None and isinstance(pp_module, str):
test_dir = os.path.join(test_dir, pp_module)
if not os.path.isdir(test_dir):
raise ValueError("test_dir {} is not a dir".format(test_dir))
return test_dir
def run_tests(parallel=False, n_cpu=None, coverage=False):
test_dir = _get_test_dir()
if coverage:
cov_tracker = cov.Coverage()
cov_tracker.start()
if parallel:
if n_cpu is None:
n_cpu = _get_cpus()
err = pytest.main([test_dir, "-xs", "-n", str(n_cpu)])
if err == 4:
if err == 4:
raise ModuleNotFoundError("Parallel testing not possible. Please make sure that "
"pytest-xdist is installed correctly.")
elif err > 2:
logger.error("Testing not successfully finished.")
else:
pytest.main([test_dir, "-xs"])
if coverage:
cov_tracker.stop()
cov_tracker.save()
cov_tracker.html_report(ignore_errors=True)
if __name__ == "__main__":
run_tests()
| true
| true
|
1c4111b17f5ddd93684d22bf76d2ddbef349695f
| 19,278
|
py
|
Python
|
melange/openstack/common/extensions.py
|
CiscoSystems/melange
|
d8ff17ecb9466b64dbd064710489ea62843e6636
|
[
"Apache-2.0"
] | 1
|
2016-05-25T13:48:06.000Z
|
2016-05-25T13:48:06.000Z
|
melange/openstack/common/extensions.py
|
openstack-attic/melange
|
6ec8d7068e09505614dfc5f3edccfbe26aa50fe4
|
[
"Apache-2.0"
] | null | null | null |
melange/openstack/common/extensions.py
|
openstack-attic/melange
|
6ec8d7068e09505614dfc5f3edccfbe26aa50fe4
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imp
import os
import routes
import webob.dec
import webob.exc
import logging
from lxml import etree
from melange.openstack.common import exception
from melange.openstack.common import wsgi
LOG = logging.getLogger('extensions')
DEFAULT_XMLNS = "http://docs.openstack.org/"
XMLNS_ATOM = "http://www.w3.org/2005/Atom"
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
def get_name(self):
"""The name of the extension.
e.g. 'Fox In Socks'
"""
raise NotImplementedError()
def get_alias(self):
"""The alias for the extension.
e.g. 'FOXNSOX'
"""
raise NotImplementedError()
def get_description(self):
"""Friendly description for the extension.
e.g. 'The Fox In Socks Extension'
"""
raise NotImplementedError()
def get_namespace(self):
"""The XML namespace for the extension.
e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0'
"""
raise NotImplementedError()
def get_updated(self):
"""The timestamp when the extension was last updated.
e.g. '2011-01-22T13:25:27-06:00'
"""
# NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
raise NotImplementedError()
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_actions(self):
"""List of extensions.ActionExtension extension objects.
Actions are verbs callable from the API.
"""
actions = []
return actions
def get_request_extensions(self):
"""List of extensions.RequestException extension objects.
Request extensions are used to handle custom request data.
"""
request_exts = []
return request_exts
class ActionExtensionController(object):
def __init__(self, application):
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
def action(self, req, id, body):
for action_name, handler in self.action_handlers.iteritems():
if action_name in body:
return handler(body, req, id)
# no action handler found (bump to downstream application)
res = self.application
return res
class ActionExtensionResource(wsgi.Resource):
def __init__(self, application):
controller = ActionExtensionController(application)
wsgi.Resource.__init__(self, controller)
def add_action(self, action_name, handler):
self.controller.add_action(action_name, handler)
class RequestExtensionController(object):
def __init__(self, application):
self.application = application
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def process(self, req, *args, **kwargs):
res = req.get_response(self.application)
# currently request handlers are un-ordered
for handler in self.handlers:
res = handler(req, res)
return res
class RequestExtensionResource(wsgi.Resource):
def __init__(self, application):
controller = RequestExtensionController(application)
wsgi.Resource.__init__(self, controller)
def add_handler(self, handler):
self.controller.add_handler(handler)
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
body_serializers = {'application/xml': ExtensionsXMLSerializer()}
serializer = wsgi.ResponseSerializer(body_serializers=body_serializers)
super(ExtensionsResource, self).__init__(self, None, serializer)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.get_name()
ext_data['alias'] = ext.get_alias()
ext_data['description'] = ext.get_description()
ext_data['namespace'] = ext.get_namespace()
ext_data['updated'] = ext.get_updated()
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, req, id):
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions.get(id, None)
if not ext:
raise webob.exc.HTTPNotFound(
_("Extension with alias %s does not exist") % id)
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
class ExtensionMiddleware(wsgi.Middleware):
"""Extensions middleware for WSGI."""
@classmethod
def factory(cls, global_config, **local_config):
"""Paste factory."""
def _factory(app):
return cls(app, global_config, **local_config)
return _factory
def _action_ext_resources(self, application, ext_mgr, mapper):
"""Return a dict of ActionExtensionResource-s by collection."""
action_resources = {}
for action in ext_mgr.get_actions():
if not action.collection in action_resources.keys():
resource = ActionExtensionResource(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
controller=resource,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" %
action.collection,
action='action',
controller=resource,
conditions=dict(method=['POST']))
action_resources[action.collection] = resource
return action_resources
def _request_ext_resources(self, application, ext_mgr, mapper):
"""Returns a dict of RequestExtensionResource-s by collection."""
request_ext_resources = {}
for req_ext in ext_mgr.get_request_extensions():
if not req_ext.key in request_ext_resources.keys():
resource = RequestExtensionResource(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
controller=resource,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
controller=resource,
conditions=req_ext.conditions)
request_ext_resources[req_ext.key] = resource
return request_ext_resources
def __init__(self, application, config, ext_mgr=None):
ext_mgr = ext_mgr or ExtensionManager(
config['api_extensions_path'])
mapper = routes.Mapper()
# extended resources
for resource_ext in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'), resource_ext.collection)
controller_resource = wsgi.Resource(resource_ext.controller,
resource_ext.deserializer,
resource_ext.serializer)
self._map_custom_collection_actions(resource_ext, mapper,
controller_resource)
kargs = dict(controller=controller_resource,
collection=resource_ext.collection_actions,
member=resource_ext.member_actions)
if resource_ext.parent:
kargs['parent_resource'] = resource_ext.parent
mapper.resource(resource_ext.collection,
resource_ext.collection, **kargs)
# extended actions
action_resources = self._action_ext_resources(application, ext_mgr,
mapper)
for action in ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
resource = action_resources[action.collection]
resource.add_action(action.action_name, action.handler)
# extended requests
req_controllers = self._request_ext_resources(application, ext_mgr,
mapper)
for request_ext in ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper)
super(ExtensionMiddleware, self).__init__(application)
def _map_custom_collection_actions(self, resource_ext, mapper,
controller_resource):
for action, method in resource_ext.collection_actions.iteritems():
parent = resource_ext.parent
conditions = dict(method=[method])
path = "/%s/%s" % (resource_ext.collection, action)
path_prefix = ""
if parent:
path_prefix = "/%s/{%s_id}" % (parent["collection_name"],
parent["member_name"])
with mapper.submapper(controller=controller_resource,
action=action,
path_prefix=path_prefix,
conditions=conditions) as submap:
submap.connect(path)
submap.connect("%s.:(format)" % path)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Route the incoming request with router."""
req.environ['extended.app'] = self.application
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=wsgi.Request)
def _dispatch(req):
"""Dispatch the request.
Returns the routed WSGI app's response or defers to the extended
application.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return req.environ['extended.app']
app = match['controller']
return app
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See nova/tests/api/openstack/extensions/foxinsocks/extension.py for an
example extension implementation.
"""
def __init__(self, path):
LOG.debug(_('Initializing extension manager.'))
self.path = path
self.extensions = {}
self._load_all_extensions()
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
extension_resource = ExtensionsResource(self)
res_ext = ResourceExtension('extensions',
extension_resource,
serializer=extension_resource.serializer)
resources.append(res_ext)
for alias, ext in self.extensions.iteritems():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_actions(self):
"""Returns a list of ActionExtension objects."""
actions = []
for alias, ext in self.extensions.iteritems():
try:
actions.extend(ext.get_actions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have action
# extensions
pass
return actions
def get_request_extensions(self):
"""Returns a list of RequestExtension objects."""
request_exts = []
for alias, ext in self.extensions.iteritems():
try:
request_exts.extend(ext.get_request_extensions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have request
# extensions
pass
return request_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.get_name())
LOG.debug(_('Ext alias: %s'), extension.get_alias())
LOG.debug(_('Ext description: %s'), extension.get_description())
LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
LOG.debug(_('Ext updated: %s'), extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def _load_all_extensions(self):
"""Load extensions from the configured path.
Load extensions from the configured path. The extension name is
constructed from the module_name. If your extension module was named
widgets.py the extension class within that module should be
'Widgets'.
In addition, extensions are loaded from the 'contrib' directory.
See nova/tests/api/openstack/extensions/foxinsocks.py for an example
extension implementation.
"""
if os.path.exists(self.path):
self._load_all_extensions_from_path(self.path)
contrib_path = os.path.join(os.path.dirname(__file__), "contrib")
if os.path.exists(contrib_path):
self._load_all_extensions_from_path(contrib_path)
def _load_all_extensions_from_path(self, path):
for f in os.listdir(path):
LOG.debug(_('Loading extension file: %s'), f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warn(_('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
def add_extension(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.get_alias()
LOG.debug(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
class RequestExtension(object):
"""Extend requests and responses of core nova OpenStack API resources.
Provide a way to add data to responses and handle custom request data
that is sent to core nova OpenStack API controllers.
"""
def __init__(self, method, url_route, handler):
self.url_route = url_route
self.handler = handler
self.conditions = dict(method=[method])
self.key = "%s-%s" % (method, url_route)
class ActionExtension(object):
"""Add custom actions to core nova OpenStack API resources."""
def __init__(self, collection, action_name, handler):
self.collection = collection
self.action_name = action_name
self.handler = handler
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in nova."""
def __init__(self, collection, controller, parent=None,
collection_actions=None, member_actions=None,
deserializer=None, serializer=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.deserializer = deserializer
self.serializer = serializer
class ExtensionsXMLSerializer(wsgi.XMLDictSerializer):
def __init__(self):
self.nsmap = {None: DEFAULT_XMLNS, 'atom': XMLNS_ATOM}
def show(self, ext_dict):
ext = etree.Element('extension', nsmap=self.nsmap)
self._populate_ext(ext, ext_dict['extension'])
return self._to_xml(ext)
def index(self, exts_dict):
exts = etree.Element('extensions', nsmap=self.nsmap)
for ext_dict in exts_dict['extensions']:
ext = etree.SubElement(exts, 'extension')
self._populate_ext(ext, ext_dict)
return self._to_xml(exts)
def _populate_ext(self, ext_elem, ext_dict):
"""Populate an extension xml element from a dict."""
ext_elem.set('name', ext_dict['name'])
ext_elem.set('namespace', ext_dict['namespace'])
ext_elem.set('alias', ext_dict['alias'])
ext_elem.set('updated', ext_dict['updated'])
desc = etree.Element('description')
desc.text = ext_dict['description']
ext_elem.append(desc)
for link in ext_dict.get('links', []):
elem = etree.SubElement(ext_elem, '{%s}link' % XMLNS_ATOM)
elem.set('rel', link['rel'])
elem.set('href', link['href'])
elem.set('type', link['type'])
return ext_elem
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8')
| 35.766234
| 79
| 0.605042
|
import imp
import os
import routes
import webob.dec
import webob.exc
import logging
from lxml import etree
from melange.openstack.common import exception
from melange.openstack.common import wsgi
LOG = logging.getLogger('extensions')
DEFAULT_XMLNS = "http://docs.openstack.org/"
XMLNS_ATOM = "http://www.w3.org/2005/Atom"
class ExtensionDescriptor(object):
def get_name(self):
raise NotImplementedError()
def get_alias(self):
raise NotImplementedError()
def get_description(self):
raise NotImplementedError()
def get_namespace(self):
raise NotImplementedError()
def get_updated(self):
raise NotImplementedError()
def get_resources(self):
resources = []
return resources
def get_actions(self):
actions = []
return actions
def get_request_extensions(self):
request_exts = []
return request_exts
class ActionExtensionController(object):
def __init__(self, application):
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
def action(self, req, id, body):
for action_name, handler in self.action_handlers.iteritems():
if action_name in body:
return handler(body, req, id)
res = self.application
return res
class ActionExtensionResource(wsgi.Resource):
def __init__(self, application):
controller = ActionExtensionController(application)
wsgi.Resource.__init__(self, controller)
def add_action(self, action_name, handler):
self.controller.add_action(action_name, handler)
class RequestExtensionController(object):
def __init__(self, application):
self.application = application
self.handlers = []
def add_handler(self, handler):
self.handlers.append(handler)
def process(self, req, *args, **kwargs):
res = req.get_response(self.application)
for handler in self.handlers:
res = handler(req, res)
return res
class RequestExtensionResource(wsgi.Resource):
def __init__(self, application):
controller = RequestExtensionController(application)
wsgi.Resource.__init__(self, controller)
def add_handler(self, handler):
self.controller.add_handler(handler)
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
body_serializers = {'application/xml': ExtensionsXMLSerializer()}
serializer = wsgi.ResponseSerializer(body_serializers=body_serializers)
super(ExtensionsResource, self).__init__(self, None, serializer)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.get_name()
ext_data['alias'] = ext.get_alias()
ext_data['description'] = ext.get_description()
ext_data['namespace'] = ext.get_namespace()
ext_data['updated'] = ext.get_updated()
ext_data['links'] = []
return ext_data
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
def show(self, req, id):
ext = self.extension_manager.extensions.get(id, None)
if not ext:
raise webob.exc.HTTPNotFound(
_("Extension with alias %s does not exist") % id)
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
class ExtensionMiddleware(wsgi.Middleware):
@classmethod
def factory(cls, global_config, **local_config):
def _factory(app):
return cls(app, global_config, **local_config)
return _factory
def _action_ext_resources(self, application, ext_mgr, mapper):
action_resources = {}
for action in ext_mgr.get_actions():
if not action.collection in action_resources.keys():
resource = ActionExtensionResource(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
controller=resource,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" %
action.collection,
action='action',
controller=resource,
conditions=dict(method=['POST']))
action_resources[action.collection] = resource
return action_resources
def _request_ext_resources(self, application, ext_mgr, mapper):
request_ext_resources = {}
for req_ext in ext_mgr.get_request_extensions():
if not req_ext.key in request_ext_resources.keys():
resource = RequestExtensionResource(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
controller=resource,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
controller=resource,
conditions=req_ext.conditions)
request_ext_resources[req_ext.key] = resource
return request_ext_resources
def __init__(self, application, config, ext_mgr=None):
ext_mgr = ext_mgr or ExtensionManager(
config['api_extensions_path'])
mapper = routes.Mapper()
for resource_ext in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'), resource_ext.collection)
controller_resource = wsgi.Resource(resource_ext.controller,
resource_ext.deserializer,
resource_ext.serializer)
self._map_custom_collection_actions(resource_ext, mapper,
controller_resource)
kargs = dict(controller=controller_resource,
collection=resource_ext.collection_actions,
member=resource_ext.member_actions)
if resource_ext.parent:
kargs['parent_resource'] = resource_ext.parent
mapper.resource(resource_ext.collection,
resource_ext.collection, **kargs)
action_resources = self._action_ext_resources(application, ext_mgr,
mapper)
for action in ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
resource = action_resources[action.collection]
resource.add_action(action.action_name, action.handler)
req_controllers = self._request_ext_resources(application, ext_mgr,
mapper)
for request_ext in ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper)
super(ExtensionMiddleware, self).__init__(application)
def _map_custom_collection_actions(self, resource_ext, mapper,
controller_resource):
for action, method in resource_ext.collection_actions.iteritems():
parent = resource_ext.parent
conditions = dict(method=[method])
path = "/%s/%s" % (resource_ext.collection, action)
path_prefix = ""
if parent:
path_prefix = "/%s/{%s_id}" % (parent["collection_name"],
parent["member_name"])
with mapper.submapper(controller=controller_resource,
action=action,
path_prefix=path_prefix,
conditions=conditions) as submap:
submap.connect(path)
submap.connect("%s.:(format)" % path)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
req.environ['extended.app'] = self.application
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=wsgi.Request)
def _dispatch(req):
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return req.environ['extended.app']
app = match['controller']
return app
class ExtensionManager(object):
def __init__(self, path):
LOG.debug(_('Initializing extension manager.'))
self.path = path
self.extensions = {}
self._load_all_extensions()
def get_resources(self):
resources = []
extension_resource = ExtensionsResource(self)
res_ext = ResourceExtension('extensions',
extension_resource,
serializer=extension_resource.serializer)
resources.append(res_ext)
for alias, ext in self.extensions.iteritems():
try:
resources.extend(ext.get_resources())
except AttributeError:
# extensions
pass
return resources
def get_actions(self):
actions = []
for alias, ext in self.extensions.iteritems():
try:
actions.extend(ext.get_actions())
except AttributeError:
# NOTE(dprince): Extension aren't required to have action
pass
return actions
def get_request_extensions(self):
request_exts = []
for alias, ext in self.extensions.iteritems():
try:
request_exts.extend(ext.get_request_extensions())
except AttributeError:
# extensions
pass
return request_exts
def _check_extension(self, extension):
try:
LOG.debug(_('Ext name: %s'), extension.get_name())
LOG.debug(_('Ext alias: %s'), extension.get_alias())
LOG.debug(_('Ext description: %s'), extension.get_description())
LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
LOG.debug(_('Ext updated: %s'), extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def _load_all_extensions(self):
if os.path.exists(self.path):
self._load_all_extensions_from_path(self.path)
contrib_path = os.path.join(os.path.dirname(__file__), "contrib")
if os.path.exists(contrib_path):
self._load_all_extensions_from_path(contrib_path)
def _load_all_extensions_from_path(self, path):
for f in os.listdir(path):
LOG.debug(_('Loading extension file: %s'), f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warn(_('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
def add_extension(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.get_alias()
LOG.debug(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
class RequestExtension(object):
def __init__(self, method, url_route, handler):
self.url_route = url_route
self.handler = handler
self.conditions = dict(method=[method])
self.key = "%s-%s" % (method, url_route)
class ActionExtension(object):
def __init__(self, collection, action_name, handler):
self.collection = collection
self.action_name = action_name
self.handler = handler
class ResourceExtension(object):
def __init__(self, collection, controller, parent=None,
collection_actions=None, member_actions=None,
deserializer=None, serializer=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.deserializer = deserializer
self.serializer = serializer
class ExtensionsXMLSerializer(wsgi.XMLDictSerializer):
def __init__(self):
self.nsmap = {None: DEFAULT_XMLNS, 'atom': XMLNS_ATOM}
def show(self, ext_dict):
ext = etree.Element('extension', nsmap=self.nsmap)
self._populate_ext(ext, ext_dict['extension'])
return self._to_xml(ext)
def index(self, exts_dict):
exts = etree.Element('extensions', nsmap=self.nsmap)
for ext_dict in exts_dict['extensions']:
ext = etree.SubElement(exts, 'extension')
self._populate_ext(ext, ext_dict)
return self._to_xml(exts)
def _populate_ext(self, ext_elem, ext_dict):
ext_elem.set('name', ext_dict['name'])
ext_elem.set('namespace', ext_dict['namespace'])
ext_elem.set('alias', ext_dict['alias'])
ext_elem.set('updated', ext_dict['updated'])
desc = etree.Element('description')
desc.text = ext_dict['description']
ext_elem.append(desc)
for link in ext_dict.get('links', []):
elem = etree.SubElement(ext_elem, '{%s}link' % XMLNS_ATOM)
elem.set('rel', link['rel'])
elem.set('href', link['href'])
elem.set('type', link['type'])
return ext_elem
def _to_xml(self, root):
return etree.tostring(root, encoding='UTF-8')
| true
| true
|
1c4112b6bb5005fd8c84c3af4197a60b1f213f99
| 960
|
py
|
Python
|
setup.py
|
hey-booster/heybooster-toolkit
|
e423a7d6e96df8440ea0599ef8dd5a0816ef0fa8
|
[
"MIT"
] | 10
|
2021-09-13T17:19:58.000Z
|
2021-12-12T21:03:59.000Z
|
setup.py
|
hey-booster/heybooster-toolkit
|
e423a7d6e96df8440ea0599ef8dd5a0816ef0fa8
|
[
"MIT"
] | 1
|
2021-09-17T12:38:42.000Z
|
2021-09-17T12:38:42.000Z
|
setup.py
|
hey-booster/heybooster-toolkit
|
e423a7d6e96df8440ea0599ef8dd5a0816ef0fa8
|
[
"MIT"
] | 2
|
2021-09-14T07:23:06.000Z
|
2021-09-15T22:15:06.000Z
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
with open("version.txt", "r") as version_file:
version = version_file.read()
setuptools.setup(
name="heybooster-toolkit",
version=version,
author="Heybooster",
author_email="hey@heybooster.ai",
description="Heybooster Toolkit",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/hey-booster/heybooster-toolkit",
project_urls={
"Bug Tracker": "https://github.com/hey-booster/heybooster-toolkit/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'pymongo==3.10.1',
'requests',
'raven==6.10.0'
],
python_requires=">=3.6",
)
| 28.235294
| 82
| 0.644792
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
with open("version.txt", "r") as version_file:
version = version_file.read()
setuptools.setup(
name="heybooster-toolkit",
version=version,
author="Heybooster",
author_email="hey@heybooster.ai",
description="Heybooster Toolkit",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/hey-booster/heybooster-toolkit",
project_urls={
"Bug Tracker": "https://github.com/hey-booster/heybooster-toolkit/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'pymongo==3.10.1',
'requests',
'raven==6.10.0'
],
python_requires=">=3.6",
)
| true
| true
|
1c4112fc94d01467ba5ebf1d3aeb0556086b69cf
| 2,261
|
py
|
Python
|
fonduer/snorkel/models/meta.py
|
leewaymay/839_fonduer
|
1692f018ef113d88dca4ede69cc2ead55b7b1003
|
[
"Apache-2.0"
] | 1
|
2018-05-31T02:44:00.000Z
|
2018-05-31T02:44:00.000Z
|
fonduer/snorkel/models/meta.py
|
leewaymay/839_fonduer
|
1692f018ef113d88dca4ede69cc2ead55b7b1003
|
[
"Apache-2.0"
] | null | null | null |
fonduer/snorkel/models/meta.py
|
leewaymay/839_fonduer
|
1692f018ef113d88dca4ede69cc2ead55b7b1003
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import *
import getpass
import os
from sqlalchemy import create_engine, event
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from urllib.parse import urlparse
# Sets connection string
snorkel_conn_string = os.environ['SNORKELDB'] if 'SNORKELDB' in os.environ and os.environ['SNORKELDB'] != '' \
else 'sqlite:///' + os.getcwd() + os.sep + 'snorkel.db'
# Modified by Zhewen
import sys
# Modified by Xiuyuan
DBNAME = snorkel_conn_string.split('/')[-1]
if 'linux' in sys.platform.lower():
DBUSER = 'postgres'
else:
DBUSER = os.environ.get('SNORKELDBUSER', getpass.getuser())
DBPORT = urlparse(snorkel_conn_string).port
# Sets global variable indicating whether we are using Postgres
snorkel_postgres = snorkel_conn_string.startswith('postgres')
# Automatically turns on foreign key enforcement for SQLite
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
if snorkel_conn_string.startswith('sqlite'):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
# Defines procedure for setting up a sessionmaker
def new_sessionmaker():
# Turning on autocommit for Postgres, see http://oddbird.net/2014/06/14/sqlalchemy-postgres-autocommit/
# Otherwise any e.g. query starts a transaction, locking tables... very bad for e.g. multiple notebooks
# open, multiple processes, etc.
if snorkel_postgres:
snorkel_engine = create_engine(snorkel_conn_string, isolation_level="AUTOCOMMIT")
else:
snorkel_engine = create_engine(snorkel_conn_string)
# New sessionmaker
SnorkelSession = sessionmaker(bind=snorkel_engine)
return SnorkelSession
# We initialize the engine within the models module because models' schema can depend on
# which data types are supported by the engine
SnorkelSession = new_sessionmaker()
snorkel_engine = SnorkelSession.kw['bind']
SnorkelBase = declarative_base(name='SnorkelBase', cls=object)
| 35.328125
| 110
| 0.773109
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import *
import getpass
import os
from sqlalchemy import create_engine, event
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from urllib.parse import urlparse
snorkel_conn_string = os.environ['SNORKELDB'] if 'SNORKELDB' in os.environ and os.environ['SNORKELDB'] != '' \
else 'sqlite:///' + os.getcwd() + os.sep + 'snorkel.db'
import sys
DBNAME = snorkel_conn_string.split('/')[-1]
if 'linux' in sys.platform.lower():
DBUSER = 'postgres'
else:
DBUSER = os.environ.get('SNORKELDBUSER', getpass.getuser())
DBPORT = urlparse(snorkel_conn_string).port
snorkel_postgres = snorkel_conn_string.startswith('postgres')
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
if snorkel_conn_string.startswith('sqlite'):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
def new_sessionmaker():
if snorkel_postgres:
snorkel_engine = create_engine(snorkel_conn_string, isolation_level="AUTOCOMMIT")
else:
snorkel_engine = create_engine(snorkel_conn_string)
SnorkelSession = sessionmaker(bind=snorkel_engine)
return SnorkelSession
# which data types are supported by the engine
SnorkelSession = new_sessionmaker()
snorkel_engine = SnorkelSession.kw['bind']
SnorkelBase = declarative_base(name='SnorkelBase', cls=object)
| true
| true
|
1c4113e495f403e84f0a5a10135d8ddb907c297a
| 2,911
|
py
|
Python
|
manage.py
|
zuozh11/epub2cbz
|
2b24533d43924fb3503bbbdbf8bdc35c86d5e066
|
[
"MIT"
] | null | null | null |
manage.py
|
zuozh11/epub2cbz
|
2b24533d43924fb3503bbbdbf8bdc35c86d5e066
|
[
"MIT"
] | null | null | null |
manage.py
|
zuozh11/epub2cbz
|
2b24533d43924fb3503bbbdbf8bdc35c86d5e066
|
[
"MIT"
] | null | null | null |
import os
import shutil
import zipfile
from PIL import Image
from parent import console, progress
class FileManager(object):
"""
This class is used for file interactions.
It has the following methods:
set_directory() --- Which set up the working directory
img_handler() --- Which xtract from the compressed file to the output directory and process the picture.
package() --- Which packaged into a compressed file with a specific suffix.
package_folder() --- Which packaged into folder.
To create an instance of this object, pass in the name of epubfile and specified path.
"""
def __init__(self, epub_file, path):
# epub文件
self.epub_file = epub_file
# 指定的路径
self.path = path
self.zfile = None
# 压缩档名称
self.title = ''
# 工作目录,任务结束后会删除
self.work_directory = ''
def set_directory(self, directory):
self.title = directory
self.work_directory = os.path.join(self.path, '.tempworkdir', directory)
def img_handler(self, file, name, rotate_flag):
# 解压到工作目录
src = self.zfile.extract(file, self.work_directory)
dst = os.path.join(self.work_directory, name)
# 移动到根工作目录,并按顺序改名
shutil.move(src, dst)
if rotate_flag:
# 图片处理 修正图片方向
img = Image.open(dst)
w, h = img.size
if h > 2200:
img.transpose(Image.ROTATE_270).save(dst)
console.log('[Correct orientation]', '[bold red]-->[/bold red]', f'{self.title}/{name}')
img.close()
def package(self, suffix, task_id):
zippath = os.path.join(self.path, self.title) + suffix
with zipfile.ZipFile(zippath, mode='w', compression=zipfile.ZIP_STORED) as zf:
file_names = list(filter(lambda x: os.path.isfile(x),
[os.path.join(self.work_directory, x) for x in os.listdir(self.work_directory)]))
for fn in file_names:
zf.write(fn, arcname=os.path.split(fn)[1])
progress.advance(task_id, 30 / len(file_names))
os.chmod(zippath, 448)
def package_folder(self, task_id):
folder = os.path.join(self.path, self.title)
if not os.path.exists(folder):
os.mkdir(folder)
file_names = list(filter(lambda x: os.path.isfile(x),
[os.path.join(self.work_directory, x) for x in os.listdir(self.work_directory)]))
for fn in file_names:
shutil.move(fn, os.path.join(folder, os.path.split(fn)[1]))
progress.advance(task_id, 30 / len(file_names))
def __enter__(self):
self.zfile = zipfile.ZipFile(self.epub_file)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.zfile is not None:
self.zfile.close()
| 33.45977
| 118
| 0.59945
|
import os
import shutil
import zipfile
from PIL import Image
from parent import console, progress
class FileManager(object):
def __init__(self, epub_file, path):
self.epub_file = epub_file
self.path = path
self.zfile = None
self.title = ''
self.work_directory = ''
def set_directory(self, directory):
self.title = directory
self.work_directory = os.path.join(self.path, '.tempworkdir', directory)
def img_handler(self, file, name, rotate_flag):
src = self.zfile.extract(file, self.work_directory)
dst = os.path.join(self.work_directory, name)
shutil.move(src, dst)
if rotate_flag:
img = Image.open(dst)
w, h = img.size
if h > 2200:
img.transpose(Image.ROTATE_270).save(dst)
console.log('[Correct orientation]', '[bold red]-->[/bold red]', f'{self.title}/{name}')
img.close()
def package(self, suffix, task_id):
zippath = os.path.join(self.path, self.title) + suffix
with zipfile.ZipFile(zippath, mode='w', compression=zipfile.ZIP_STORED) as zf:
file_names = list(filter(lambda x: os.path.isfile(x),
[os.path.join(self.work_directory, x) for x in os.listdir(self.work_directory)]))
for fn in file_names:
zf.write(fn, arcname=os.path.split(fn)[1])
progress.advance(task_id, 30 / len(file_names))
os.chmod(zippath, 448)
def package_folder(self, task_id):
folder = os.path.join(self.path, self.title)
if not os.path.exists(folder):
os.mkdir(folder)
file_names = list(filter(lambda x: os.path.isfile(x),
[os.path.join(self.work_directory, x) for x in os.listdir(self.work_directory)]))
for fn in file_names:
shutil.move(fn, os.path.join(folder, os.path.split(fn)[1]))
progress.advance(task_id, 30 / len(file_names))
def __enter__(self):
self.zfile = zipfile.ZipFile(self.epub_file)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.zfile is not None:
self.zfile.close()
| true
| true
|
1c4114bff0fe7e0c88d7679111a14cc2daab0ad8
| 5,122
|
py
|
Python
|
adlib/tests/adversaries/k_insertion_test.py
|
xyvivian/adlib
|
79a93baa8aa542080bbf55734168eb89317df83c
|
[
"MIT"
] | null | null | null |
adlib/tests/adversaries/k_insertion_test.py
|
xyvivian/adlib
|
79a93baa8aa542080bbf55734168eb89317df83c
|
[
"MIT"
] | null | null | null |
adlib/tests/adversaries/k_insertion_test.py
|
xyvivian/adlib
|
79a93baa8aa542080bbf55734168eb89317df83c
|
[
"MIT"
] | null | null | null |
# k_insertion_test.py
# Tests the k-insertion implementation
# Matthew Sedam
from adlib.adversaries.k_insertion import KInsertion
from adlib.learners import SimpleLearner
from adlib.utils.common import calculate_correct_percentages
from copy import deepcopy
from data_reader.dataset import EmailDataset
from data_reader.operations import load_dataset
from sklearn import svm
import sys
import time
def test_k_insertion():
"""
Use as follows:
python3 adlib/tests/adversaries/k_insertion_test.py NUMBER-TO-ADD
"""
print()
print('###################################################################')
print('START k-insertion attack.\n')
begin = time.time()
# Data processing unit
# The path is an index of 400 testing samples(raw email data).
dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
binary=False, raw=True)
training_data, predict_data = dataset.split({'train': 20, 'test': 80})
training_data = load_dataset(training_data)
predict_data = load_dataset(predict_data)
print('Training sample size: ', len(training_data), '/400\n', sep='')
if len(sys.argv) > 2:
number_to_add = int(sys.argv[1])
else:
number_to_add = int(0.25 * len(training_data))
# Setting the default learner
# Test simple learner svm
learning_model = svm.SVC(probability=True, kernel='linear')
learner = SimpleLearner(learning_model, training_data)
learner.train()
original_pred_labels = learner.predict(training_data)
before_attack_label = original_pred_labels[0]
orig_learner = deepcopy(learner)
# Do the attack
attacker = KInsertion(learner,
training_data[0],
number_to_add=number_to_add,
verbose=True)
attack_data = attacker.attack(training_data)
# Retrain the model with poisoned data
learning_model = svm.SVC(probability=True, kernel='linear')
learner = SimpleLearner(learning_model, attack_data)
learner.train()
print('Number of added instances: ', len(attack_data) - len(training_data))
############################################################################
# Calculate statistics with training data
attack_pred_labels = learner.predict(training_data) # predict w/ orig label
after_attack_label = attack_pred_labels[0]
(orig_precent_correct,
attack_precent_correct,
difference) = calculate_correct_percentages(original_pred_labels,
attack_pred_labels,
training_data)
print('###################################################################')
print('Predictions with training dataset:')
print('Original correct percentage: ', orig_precent_correct, '%')
print('Attack correct percentage: ', attack_precent_correct, '%')
print('Difference: ', difference, '%')
############################################################################
# Calculate statistics with predict data (other half of dataset)
original_pred_labels = orig_learner.predict(predict_data)
attack_pred_labels = learner.predict(predict_data)
(orig_precent_correct,
attack_precent_correct,
difference) = calculate_correct_percentages(original_pred_labels,
attack_pred_labels,
predict_data)
print('###################################################################')
print('Predictions with other half of dataset:')
print('Original correct percentage: ', orig_precent_correct, '%')
print('Attack correct percentage: ', attack_precent_correct, '%')
print('Difference: ', difference, '%')
############################################################################
# Calculate statistics with predict data (other half of dataset)
print('###################################################################')
print('Selected instance true label: ', training_data[0].get_label())
print('Selected instance predicted label BEFORE attack: ',
before_attack_label)
print('Selected instance predicted label AFTER attack: ',
after_attack_label)
############################################################################
# Output loss calculations
print('###################################################################')
print('poison_instance loss before attack: ',
round(attacker.poison_loss_before, 4))
print('poison_instance loss after attack: ',
round(attacker.poison_loss_after, 4))
print('poison_instance loss difference: ',
round(attacker.poison_loss_after - attacker.poison_loss_before, 4))
end = time.time()
print('\nTotal time: ', round(begin - end, 2), 's', '\n', sep='')
print('\nEND k-insertion attack.')
print('###################################################################')
print()
if __name__ == '__main__':
test_k_insertion()
| 37.661765
| 80
| 0.572237
|
from adlib.adversaries.k_insertion import KInsertion
from adlib.learners import SimpleLearner
from adlib.utils.common import calculate_correct_percentages
from copy import deepcopy
from data_reader.dataset import EmailDataset
from data_reader.operations import load_dataset
from sklearn import svm
import sys
import time
def test_k_insertion():
print()
print('###################################################################')
print('START k-insertion attack.\n')
begin = time.time()
dataset = EmailDataset(path='./data_reader/data/raw/trec05p-1/test-400',
binary=False, raw=True)
training_data, predict_data = dataset.split({'train': 20, 'test': 80})
training_data = load_dataset(training_data)
predict_data = load_dataset(predict_data)
print('Training sample size: ', len(training_data), '/400\n', sep='')
if len(sys.argv) > 2:
number_to_add = int(sys.argv[1])
else:
number_to_add = int(0.25 * len(training_data))
learning_model = svm.SVC(probability=True, kernel='linear')
learner = SimpleLearner(learning_model, training_data)
learner.train()
original_pred_labels = learner.predict(training_data)
before_attack_label = original_pred_labels[0]
orig_learner = deepcopy(learner)
attacker = KInsertion(learner,
training_data[0],
number_to_add=number_to_add,
verbose=True)
attack_data = attacker.attack(training_data)
learning_model = svm.SVC(probability=True, kernel='linear')
learner = SimpleLearner(learning_model, attack_data)
learner.train()
print('Number of added instances: ', len(attack_data) - len(training_data))
| true
| true
|
1c411506fb72d22c7e7a358de91454523fcc8674
| 3,271
|
py
|
Python
|
bot/utils/i18n.py
|
Clutter-Development/Clutter
|
6b725c016a439958caaa7d88bacae8e2b11ca272
|
[
"CC0-1.0"
] | 6
|
2022-02-04T17:11:19.000Z
|
2022-03-05T09:14:39.000Z
|
bot/utils/i18n.py
|
Clutter-Development/Clutter
|
6b725c016a439958caaa7d88bacae8e2b11ca272
|
[
"CC0-1.0"
] | 2
|
2022-02-08T16:53:42.000Z
|
2022-02-19T07:44:19.000Z
|
bot/utils/i18n.py
|
Clutter-Development/Clutter
|
6b725c016a439958caaa7d88bacae8e2b11ca272
|
[
"CC0-1.0"
] | 2
|
2022-02-18T21:28:57.000Z
|
2022-02-23T17:08:18.000Z
|
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import json5
from .database import find_in_dict
from .errors import UnknownTranslationString
if TYPE_CHECKING:
import discord
from core.bot import Clutter
from discord.ext import commands
class I18N:
def __init__(self, bot: Clutter, lang_file_dir: str, /):
self._db = bot.db
self.languages = {}
self.fallback = bot.default_language
for lang_file in os.listdir(lang_file_dir):
if lang_file.endswith(".json5"):
with open(os.path.join(lang_file_dir, lang_file)) as f:
self.languages[lang_file[:-6]] = json5.load(f)
def translate_with_locale(self, language: str, text: str, /) -> str:
"""Translate a string with a locale. If the translation is not found, the fallback translation is used. If the fallback translation is not found, an error is raised.
Args:
language (str): The language to use.
text (str): The string code to get translation of.
Raises:
UnknownTranslaionString: If the fallback translation is not found.
Returns:
str: The translated string.
"""
path = text.split(".")
value = find_in_dict(
self.languages[language],
path,
default=find_in_dict(self.languages[self.fallback], path),
)
if value is None:
raise UnknownTranslationString(f"Could not find translation for {text.join('.')}")
return value
async def __call__(
self,
ctx: discord.Message | discord.Interaction | commands.Context,
text: str,
/,
*,
use_guild: bool = False,
) -> str:
"""Translates a string code.
Args:
ctx (discord.Message | discord.Interaction | commands.Context): The language context to use.
text (str | list[str]): The string code to get translation of.
use_guild (bool, optional): To just use the guild language and ignoret the user's language. Defaults to False.
Returns:
str: The translated string.
"""
is_interaction = isinstance(ctx, discord.Interaction)
async def determine_guild_language() -> str:
if (is_interaction and not ctx.guild_id) or (not is_interaction and not ctx.guild): # type: ignore
return self.fallback
g_locale = ctx.guild_locale if is_interaction else ctx.guild.preferred_locale # type: ignore
return await self._db.get(
f"guilds.{ctx.guild_id if is_interaction else ctx.guild.id}.language", default=g_locale or self.fallback # type: ignore
)
guild_exists = bool(ctx.guild_id if is_interaction else ctx.guild)
if use_guild and guild_exists:
lang = await determine_guild_language()
else:
user_locale = ctx.locale if is_interaction else None
lang = await self._db.get(
f"users.{ctx.user.id if is_interaction else ctx.author.id}.language",
default=user_locale or await determine_guild_language(),
)
return self.translate_with_locale(lang, text)
| 36.344444
| 173
| 0.626414
|
from __future__ import annotations
import os
from typing import TYPE_CHECKING
import json5
from .database import find_in_dict
from .errors import UnknownTranslationString
if TYPE_CHECKING:
import discord
from core.bot import Clutter
from discord.ext import commands
class I18N:
def __init__(self, bot: Clutter, lang_file_dir: str, /):
self._db = bot.db
self.languages = {}
self.fallback = bot.default_language
for lang_file in os.listdir(lang_file_dir):
if lang_file.endswith(".json5"):
with open(os.path.join(lang_file_dir, lang_file)) as f:
self.languages[lang_file[:-6]] = json5.load(f)
def translate_with_locale(self, language: str, text: str, /) -> str:
path = text.split(".")
value = find_in_dict(
self.languages[language],
path,
default=find_in_dict(self.languages[self.fallback], path),
)
if value is None:
raise UnknownTranslationString(f"Could not find translation for {text.join('.')}")
return value
async def __call__(
self,
ctx: discord.Message | discord.Interaction | commands.Context,
text: str,
/,
*,
use_guild: bool = False,
) -> str:
is_interaction = isinstance(ctx, discord.Interaction)
async def determine_guild_language() -> str:
if (is_interaction and not ctx.guild_id) or (not is_interaction and not ctx.guild):
return self.fallback
g_locale = ctx.guild_locale if is_interaction else ctx.guild.preferred_locale
return await self._db.get(
f"guilds.{ctx.guild_id if is_interaction else ctx.guild.id}.language", default=g_locale or self.fallback
)
guild_exists = bool(ctx.guild_id if is_interaction else ctx.guild)
if use_guild and guild_exists:
lang = await determine_guild_language()
else:
user_locale = ctx.locale if is_interaction else None
lang = await self._db.get(
f"users.{ctx.user.id if is_interaction else ctx.author.id}.language",
default=user_locale or await determine_guild_language(),
)
return self.translate_with_locale(lang, text)
| true
| true
|
1c411535f505744d40d152a4e11b8664eae771a6
| 1,576
|
py
|
Python
|
src/compas_pgs/ui/Rhino/3GS/dev/PGS__session_save_cmd.py
|
BlockResearchGroup/compas-3GS
|
a1f7be3a364f93bb7560688c0e7acee8f86c535f
|
[
"MIT"
] | 2
|
2021-11-03T23:22:33.000Z
|
2021-11-03T23:22:41.000Z
|
src/compas_pgs/ui/Rhino/3GS/dev/PGS__session_save_cmd.py
|
BlockResearchGroup/compas-3GS
|
a1f7be3a364f93bb7560688c0e7acee8f86c535f
|
[
"MIT"
] | null | null | null |
src/compas_pgs/ui/Rhino/3GS/dev/PGS__session_save_cmd.py
|
BlockResearchGroup/compas-3GS
|
a1f7be3a364f93bb7560688c0e7acee8f86c535f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import json
import compas_rhino
from compas.utilities import DataEncoder
from compas_pgs.rhino import get_system
from compas_pgs.rhino import get_scene
from compas_pgs.rhino import select_filepath_save
__commandname__ = "PGS__session_save"
HERE = compas_rhino.get_document_dirname()
def RunCommand(is_interactive):
system = get_system()
if not system:
return
scene = get_scene()
if not scene:
return
dirname = system['session.dirname']
filename = system['session.filename']
extension = system['session.extension']
filepath = select_filepath_save(dirname, extension)
if not filepath:
return
dirname, basename = os.path.split(filepath)
filename, _ = os.path.splitext(basename)
filepath = os.path.join(dirname, filename + '.' + extension)
session = {
"data": {"form": None, "force": None},
"settings": scene.settings,
}
form = scene.get('form')[0]
if form:
session['data']['form'] = form.datastructure.to_data()
force = scene.get('force')[0]
if force:
session['data']['force'] = force.datastructure.to_data()
with open(filepath, 'w+') as f:
json.dump(session, f, cls=DataEncoder)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
RunCommand(True)
| 22.84058
| 80
| 0.610406
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import json
import compas_rhino
from compas.utilities import DataEncoder
from compas_pgs.rhino import get_system
from compas_pgs.rhino import get_scene
from compas_pgs.rhino import select_filepath_save
__commandname__ = "PGS__session_save"
HERE = compas_rhino.get_document_dirname()
def RunCommand(is_interactive):
system = get_system()
if not system:
return
scene = get_scene()
if not scene:
return
dirname = system['session.dirname']
filename = system['session.filename']
extension = system['session.extension']
filepath = select_filepath_save(dirname, extension)
if not filepath:
return
dirname, basename = os.path.split(filepath)
filename, _ = os.path.splitext(basename)
filepath = os.path.join(dirname, filename + '.' + extension)
session = {
"data": {"form": None, "force": None},
"settings": scene.settings,
}
form = scene.get('form')[0]
if form:
session['data']['form'] = form.datastructure.to_data()
force = scene.get('force')[0]
if force:
session['data']['force'] = force.datastructure.to_data()
with open(filepath, 'w+') as f:
json.dump(session, f, cls=DataEncoder)
if __name__ == '__main__':
RunCommand(True)
| true
| true
|
1c411633507834ebf441912e41c4aa9c0df844f1
| 311
|
py
|
Python
|
ocpmodels/datasets/embeddings/__init__.py
|
Irlirion/ocp
|
6fb3e794eef31559db990300198eca20f41d8f37
|
[
"MIT",
"BSD-3-Clause"
] | 242
|
2020-10-14T11:10:43.000Z
|
2022-03-29T07:50:18.000Z
|
ocpmodels/datasets/embeddings/__init__.py
|
Irlirion/ocp
|
6fb3e794eef31559db990300198eca20f41d8f37
|
[
"MIT",
"BSD-3-Clause"
] | 100
|
2020-10-13T23:27:04.000Z
|
2022-03-23T16:50:26.000Z
|
ocpmodels/datasets/embeddings/__init__.py
|
Irlirion/ocp
|
6fb3e794eef31559db990300198eca20f41d8f37
|
[
"MIT",
"BSD-3-Clause"
] | 86
|
2020-10-15T05:56:28.000Z
|
2022-03-16T16:11:45.000Z
|
__all__ = [
"ATOMIC_RADII",
"KHOT_EMBEDDINGS",
"CONTINUOUS_EMBEDDINGS",
"QMOF_KHOT_EMBEDDINGS",
]
from .atomic_radii import ATOMIC_RADII
from .continuous_embeddings import CONTINUOUS_EMBEDDINGS
from .khot_embeddings import KHOT_EMBEDDINGS
from .qmof_khot_embeddings import QMOF_KHOT_EMBEDDINGS
| 25.916667
| 56
| 0.807074
|
__all__ = [
"ATOMIC_RADII",
"KHOT_EMBEDDINGS",
"CONTINUOUS_EMBEDDINGS",
"QMOF_KHOT_EMBEDDINGS",
]
from .atomic_radii import ATOMIC_RADII
from .continuous_embeddings import CONTINUOUS_EMBEDDINGS
from .khot_embeddings import KHOT_EMBEDDINGS
from .qmof_khot_embeddings import QMOF_KHOT_EMBEDDINGS
| true
| true
|
1c41168c3a3d2953b6fe2d3e077ac4ba4cac7f89
| 1,075
|
py
|
Python
|
src/deep_learning/train.py
|
Nirvana23333sun/fingerprint-verfication
|
b2a89c0cea50ca57f602fbca41e935486b301c71
|
[
"Apache-2.0"
] | 2
|
2021-07-15T08:49:26.000Z
|
2021-09-10T22:23:04.000Z
|
src/deep_learning/train.py
|
Nirvana23333sun/fingerprint-verfication
|
b2a89c0cea50ca57f602fbca41e935486b301c71
|
[
"Apache-2.0"
] | null | null | null |
src/deep_learning/train.py
|
Nirvana23333sun/fingerprint-verfication
|
b2a89c0cea50ca57f602fbca41e935486b301c71
|
[
"Apache-2.0"
] | 1
|
2021-07-15T08:49:37.000Z
|
2021-07-15T08:49:37.000Z
|
import torch.optim as optim
class Train:
def __init__(self, data_sampler, model, criterion):
self.data_sampler = data_sampler
self.criterion = criterion
self.model = model
def train(self, iterations, lr):
optimizer = optim.Adam(self.model.parameters(), lr)
avg_loss = 0
for e in range(iterations):
siamese_1, siamese_2, label = self.data_sampler.sample()
siamese_1, siamese_2, label = siamese_1.cuda(), siamese_2.cuda(), label.cuda()
optimizer.zero_grad()
output1, output2 = self.model(siamese_1, siamese_2)
loss = self.criterion(output1, output2, label)
avg_loss = avg_loss + float(loss.item())
loss.backward()
optimizer.step()
if e % 50 == 49:
loss = avg_loss / 50
print("Step {} - lr {} - loss: {}".format(e, lr, loss))
avg_loss = 0
# error = self.siamese_nn.loss_func(2 ** 8)
# self.siamese_nn.append(error.detach())
| 31.617647
| 90
| 0.56
|
import torch.optim as optim
class Train:
def __init__(self, data_sampler, model, criterion):
self.data_sampler = data_sampler
self.criterion = criterion
self.model = model
def train(self, iterations, lr):
optimizer = optim.Adam(self.model.parameters(), lr)
avg_loss = 0
for e in range(iterations):
siamese_1, siamese_2, label = self.data_sampler.sample()
siamese_1, siamese_2, label = siamese_1.cuda(), siamese_2.cuda(), label.cuda()
optimizer.zero_grad()
output1, output2 = self.model(siamese_1, siamese_2)
loss = self.criterion(output1, output2, label)
avg_loss = avg_loss + float(loss.item())
loss.backward()
optimizer.step()
if e % 50 == 49:
loss = avg_loss / 50
print("Step {} - lr {} - loss: {}".format(e, lr, loss))
avg_loss = 0
| true
| true
|
1c4117059d47c41f5773d5f9d6f4ab916bf1a0b9
| 21,462
|
py
|
Python
|
main_unified_inverter.py
|
joycenerd/GenRep
|
9cc21dd4b81d6649659308c42c192b0be73a040d
|
[
"MIT"
] | null | null | null |
main_unified_inverter.py
|
joycenerd/GenRep
|
9cc21dd4b81d6649659308c42c192b0be73a040d
|
[
"MIT"
] | null | null | null |
main_unified_inverter.py
|
joycenerd/GenRep
|
9cc21dd4b81d6649659308c42c192b0be73a040d
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import numpy as np
import os
import sys
import argparse
import time
import math
import torchvision.utils as vutils
import tensorboard_logger as tb_logger
import torch
import torch.backends.cudnn as cudnn
from torchvision import transforms, datasets
from util import TwoCropTransform, AverageMeter, GansetDataset, GansteerDataset
from util import adjust_learning_rate, warmup_learning_rate, accuracy
from util import set_optimizer, save_model
from networks.resnet_big import SupConResNet, SupCEResNet, SupInverterResNet, UnsupInverterResNet
from losses import SupConLoss, SupInverterLoss, UnsupInverterLoss
import oyaml as yaml
try:
import apex
from apex import amp, optimizers
except ImportError:
pass
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--encoding_type', type=str, default='contrastive',
choices=['contrastive', 'crossentropy', 'inverter'])
parser.add_argument('--print_freq', type=int, default=10,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=20,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=200,
help='number of training epochs')
parser.add_argument('--showimg', action='store_true', help='display image in tensorboard')
parser.add_argument('--removeimtf', action='store_true', help='on/off transformations for inverter')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.03,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='120,160',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
# model dataset
parser.add_argument('--model', type=str, default='resnet50')
parser.add_argument('--dataset', type=str, default='biggan',
choices=['biggan', 'cifar10', 'cifar100', 'imagenet100', 'imagenet100K', 'imagenet'], help='dataset')
## Ali: todo: this should be based on opt.encoding type and remove the default (revisit every default) and name of the model for saving
# method
parser.add_argument('--numcontrast', type=int, default=20,
help='num of workers to use')
parser.add_argument('--method', type=str, default='SimCLR',
choices=['SupCon', 'SimCLR'], help='choose method')
parser.add_argument('--walk_method', type=str, choices=['none', 'random', 'steer', 'pca'], help='choose method')
# temperature
parser.add_argument('--temp', type=float, default=0.1,
help='temperature for loss function')
# other setting
parser.add_argument('--cosine', action='store_true', help='using cosine annealing')
parser.add_argument('--syncBN', action='store_true',
help='using synchronized batch normalization')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--trial', type=str, default='0',
help='id for recording multiple runs')
# specifying folders
parser.add_argument('-d', '--data_folder', type=str,
default='/data/scratch-oc40/jahanian/ganclr_results/ImageNet100',
help='the data folder')
parser.add_argument('-s', '--cache_folder', type=str,
default='/data/scratch-oc40/jahanian/ganclr_results/',
help='the saving folder')
opt = parser.parse_args()
# set the path according to the environment
opt.data_folder = opt.data_folder
if opt.encoding_type == 'crossentropy':
opt.method = 'SupCE'
opt.model_path = os.path.join(opt.cache_folder, 'SupCE/{}_models'.format(opt.dataset))
opt.tb_path = os.path.join(opt.cache_folder, 'SupCE/{}_tensorboard'.format(opt.dataset))
elif opt.encoding_type == 'inverter':
if opt.method == 'SupCon':
opt.method = 'SupInv'
elif opt.method == 'SimCLR':
opt.method = 'UnsupInv'
opt.model_path = os.path.join(opt.cache_folder, '{}/{}_models'.format(opt.method, opt.dataset))
opt.tb_path = os.path.join(opt.cache_folder, '{}/{}_tensorboard'.format(opt.method, opt.dataset))
else:
opt.model_path = os.path.join(opt.cache_folder, '{}/{}_models'.format(opt.method, opt.dataset))
opt.tb_path = os.path.join(opt.cache_folder, '{}/{}_tensorboard'.format(opt.method, opt.dataset))
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_{}_ncontrast.{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, opt.model, opt.numcontrast, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.encoding_type == 'inverter':
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, opt.model, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
opt.model_name = '{}_{}'.format(opt.model_name, os.path.basename(opt.data_folder))
# warm-up for large-batch training,
if opt.batch_size > 256:
opt.warm = True
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
if opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
# or 256 as you like
opt.img_size = 128
opt.n_cls = 1000
elif opt.dataset == 'cifar10' or opt.dataset == 'cifar100':
opt.img_size = 32
return opt
def set_loader(opt):
# construct data loader
if opt.dataset == 'cifar10':
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
elif opt.dataset == 'cifar100':
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
elif opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
normalize = transforms.Normalize(mean=mean, std=std)
opt.mean = mean
opt.std = std
if opt.removeimtf:
print('>>> removeimtf is ON.')
train_transform = transforms.Compose([
transforms.CenterCrop(size=int(opt.img_size*0.875)),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.RandomResizedCrop(size=int(opt.img_size*0.875), scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
normalize,
])
# train_transform = transforms.Compose([
# transforms.RandomResizedCrop(size=int(opt.img_size*0.875), scale=(0.2, 1.)),
# transforms.RandomHorizontalFlip(),
# transforms.RandomApply([
# transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
# ], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.ToTensor(),
# normalize,
# ])
if opt.dataset == 'cifar10':
train_dataset = datasets.CIFAR10(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'cifar100':
train_dataset = datasets.CIFAR100(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
train_dataset = datasets.ImageFolder(root=os.path.join(opt.data_folder, 'train'),
transform=TwoCropTransform(train_transform))
elif opt.dataset == 'biggan':
if opt.walk_method == 'random':
train_dataset = GansetDataset(root_dir=os.path.join(opt.data_folder, 'train'),
transform=train_transform, numcontrast=opt.numcontrast,
method=opt.method)
elif opt.walk_method == 'steer':
train_dataset = GansteerDataset(root_dir=os.path.join(opt.data_folder, 'train'),
transform=train_transform, numcontrast=opt.numcontrast,
method=opt.method)
elif opt.walk_method == 'none':
train_dataset = datasets.ImageFolder(root=os.path.join(opt.data_folder, 'train'),
transform=TwoCropTransform(train_transform))
## Ali: ToDo: elif opt.walk_method == 'pca'...
else:
raise ValueError(opt.dataset)
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)
return train_loader
def set_model(opt):
if opt.encoding_type == 'contrastive':
model = SupConResNet(name=opt.model, img_size=int(opt.img_size*0.875))
criterion = SupConLoss(temperature=opt.temp)
elif opt.encoding_type == 'crossentropy':
model = SupCEResNet(name=opt.model, num_classes=opt.n_cls, img_size=int(opt.img_size*0.875))
criterion = torch.nn.CrossEntropyLoss()
elif opt.encoding_type == 'inverter':
if opt.method == 'SupInv':
model = SupInverterResNet(name=opt.model, img_size=int(opt.img_size*0.875))
criterion = SupInverterLoss()
elif opt.method == 'UnsupInv':
model = UnsupInverterResNet(name=opt.model, img_size=int(opt.img_size*0.875))
criterion = UnsupInverterLoss()
# enable synchronized Batch Normalization
if opt.syncBN:
model = apex.parallel.convert_syncbn_model(model)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
model.encoder = torch.nn.DataParallel(model.encoder)
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
return model, criterion
def train(train_loader, model, criterion, optimizer, epoch, opt):
"""one epoch training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# top1 = AverageMeter()
top1 = 0 ##Ali ToDo: deal with this top1
end = time.time()
## Ali: Todo: this data loading depends on if we generate positive on the fly or load them. if loading then we have data[0] and data[1]
## so we need to check for opt.encoding_type (currently it's good for SupCon with gan data, i.e., positive pairs are from gan)
# for idx, (images, labels) in enumerate(train_loader):
# data_time.update(time.time() - end)
print("Start train")
# t1 = time.time()
for idx, data in enumerate(train_loader):
# print('batch loading time', time.time() - t1)
# t2 = time.time()
if len(data) == 2:
images = data[0]
labels = data[1]
elif len(data) == 3:
images = data[:2]
labels = data[2]
elif len(data) == 4:
images = data[:2]
labels = data[2]
labels_class = data[3]
elif len(data) == 5:
images = data[:2]
labels = data[2]
labels_class = data[3]
z_vect = data[4]
else:
raise NotImplementedError
data_time.update(time.time() - end)
if opt.encoding_type == 'crossentropy':
# We only pick one of images
images = images[1]
elif opt.encoding_type == 'inverter':
# We only pick one of images and that's anchor
# images = images[0] # <== this is for pairing z_anchor and anchor
images = images[1] # <== this is for pairing z_anchor and anchor
# also only pick z_vect[0] ToDo: if alwasy the case just send z anchor (channel 0) from loader, but make sure image is also images[0]
z_vect = z_vect[0].cuda(non_blocking=True)
elif opt.encoding_type == 'contrastive':
ims = images[0]
anchors = images[1]
images = torch.cat([images[0].unsqueeze(1), images[1].unsqueeze(1)],
dim=1)
# print('2) images shape', images.shape)
images = images.view(-1, 3, int(opt.img_size*0.875), int(opt.img_size*0.875)).cuda(non_blocking=True)
# print('3) images shape', images.shape)
labels = labels.cuda(non_blocking=True)
bsz = labels.shape[0]
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
# compute loss
if opt.encoding_type == 'contrastive':
features = model(images)
features = features.view(bsz, 2, -1)
if opt.method == 'SupCon':
loss = criterion(features, labels)
elif opt.method == 'SimCLR':
loss = criterion(features)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.method))
elif opt.encoding_type == 'crossentropy':
output = model(images)
loss = criterion(output, labels)
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
top1.update(acc1[0], bsz)
elif opt.encoding_type == 'inverter':
top1 = 0
#print('images.shape:', images.shape)
output = model(images.cuda()) # images.shape: [256, 3, 112, 112]
if opt.method == 'SupInv':
loss, loss_z, loss_y = criterion(output, z_vect, labels) #how many images as images? output is z and y and z_vect[0] is zs and z_vect[1] are y and are shape [256, 128] and [256, 1000]
elif opt.method == 'UnsupInv':
loss = criterion(output, z_vect) #how many images as images? output is z and y and z_vect[0] is zs and z_vect[1] are y and are shape [256, 128] and [256, 1000]
## Ali: ToDo deal with this top1
# acc1, acc5 = accuracy(output, labels, topk=(1, 5))
# top1.update(acc1[0], bsz)
top1 = 0
else:
raise NotImplementedError
# t3 = time.time()
# print('{}- spent time before loss update: {}'.format(idx, t3 - t2))
# update metric
losses.update(loss.item(), bsz)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
# t4 = time.time()
# print('{}- spent after before loss update: {}'.format(idx, t4 - t3))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
if opt.encoding_type == 'crossentropy':
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
else:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
# t1 = time.time()
other_metrics = {}
if opt.encoding_type == 'crossentropy':
other_metrics['top1_acc'] = top1.avg
elif opt.encoding_type == 'contrastive':
if opt.showimg:
other_metrics['image'] = [ims[:8], anchors[:8]]
elif opt.encoding_type == 'inverter':
if opt.showimg:
other_metrics['image'] = [images[:8]]
if opt.method == 'SupInv':
return losses.avg, other_metrics, loss_z, loss_y
elif opt.method == 'UnsupInv':
return losses.avg, other_metrics, loss
else:
return losses.avg, other_metrics
def main():
opt = parse_option()
with open(os.path.join(opt.save_folder, 'optE.yml'), 'w') as f:
yaml.dump(vars(opt), f, default_flow_style=False)
# build data loader
# opt.encoding_type tells us how to get training data
train_loader = set_loader(opt)
# build model and criterion
# opt.encoding_type tells us what to put as the head; choices are:
# contrastive -> mlp or linear
# crossentropy -> one linear for pred_y
# inverter -> one linear for pred_z and one linear for pred_y
model, criterion = set_model(opt)
# build optimizer
optimizer = set_optimizer(opt, model)
# tensorboard
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
# training routine
for epoch in range(1, opt.epochs + 1):
adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
time1 = time.time()
if opt.method == 'SupInv':
loss, other_metrics,loss_z, loss_y = train(train_loader, model, criterion, optimizer, epoch, opt)
logger.log_value('total loss', loss, epoch)
logger.log_value('loss_z', loss_z, epoch)
logger.log_value('loss_y', loss_y, epoch)
elif opt.method == 'UnsupInv':
loss, other_metrics,loss = train(train_loader, model, criterion, optimizer, epoch, opt)
logger.log_value('loss_z', loss, epoch)
else:
loss, other_metrics = train(train_loader, model, criterion, optimizer, epoch, opt)
logger.log_value('loss', loss, epoch)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
# tensorboard logger
# logger.log_value('loss', loss, epoch)
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
for metric_name, metric_value in other_metrics.items():
if metric_name == 'image':
images = metric_value
anchors = images[0]
otherims = images[1]
bs = anchors.shape[0]
grid_images = vutils.make_grid(
torch.cat((anchors, otherims)), nrow=bs)
grid_images *= np.array(opt.std)[:, None, None]
grid_images += np.array(opt.mean)[:, None, None]
grid_images = (255*grid_images.cpu().numpy()).astype(np.uint8)
grid_images = grid_images[None, :].transpose(0,2,3,1)
logger.log_images(metric_name, grid_images, epoch)
else:
logger.log_value(metric_name, metric_value, epoch)
if epoch % opt.save_freq == 0:
save_file = os.path.join(
opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
save_model(model, optimizer, opt, epoch, save_file)
# save the last model
save_file = os.path.join(
opt.save_folder, 'last.pth')
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == '__main__':
main()
| 42.165029
| 200
| 0.590951
|
from __future__ import print_function
import numpy as np
import os
import sys
import argparse
import time
import math
import torchvision.utils as vutils
import tensorboard_logger as tb_logger
import torch
import torch.backends.cudnn as cudnn
from torchvision import transforms, datasets
from util import TwoCropTransform, AverageMeter, GansetDataset, GansteerDataset
from util import adjust_learning_rate, warmup_learning_rate, accuracy
from util import set_optimizer, save_model
from networks.resnet_big import SupConResNet, SupCEResNet, SupInverterResNet, UnsupInverterResNet
from losses import SupConLoss, SupInverterLoss, UnsupInverterLoss
import oyaml as yaml
try:
import apex
from apex import amp, optimizers
except ImportError:
pass
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--encoding_type', type=str, default='contrastive',
choices=['contrastive', 'crossentropy', 'inverter'])
parser.add_argument('--print_freq', type=int, default=10,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=20,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=200,
help='number of training epochs')
parser.add_argument('--showimg', action='store_true', help='display image in tensorboard')
parser.add_argument('--removeimtf', action='store_true', help='on/off transformations for inverter')
parser.add_argument('--learning_rate', type=float, default=0.03,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='120,160',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
parser.add_argument('--model', type=str, default='resnet50')
parser.add_argument('--dataset', type=str, default='biggan',
choices=['biggan', 'cifar10', 'cifar100', 'imagenet100', 'imagenet100K', 'imagenet'], help='dataset')
add_argument('--method', type=str, default='SimCLR',
choices=['SupCon', 'SimCLR'], help='choose method')
parser.add_argument('--walk_method', type=str, choices=['none', 'random', 'steer', 'pca'], help='choose method')
parser.add_argument('--temp', type=float, default=0.1,
help='temperature for loss function')
parser.add_argument('--cosine', action='store_true', help='using cosine annealing')
parser.add_argument('--syncBN', action='store_true',
help='using synchronized batch normalization')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--trial', type=str, default='0',
help='id for recording multiple runs')
parser.add_argument('-d', '--data_folder', type=str,
default='/data/scratch-oc40/jahanian/ganclr_results/ImageNet100',
help='the data folder')
parser.add_argument('-s', '--cache_folder', type=str,
default='/data/scratch-oc40/jahanian/ganclr_results/',
help='the saving folder')
opt = parser.parse_args()
opt.data_folder = opt.data_folder
if opt.encoding_type == 'crossentropy':
opt.method = 'SupCE'
opt.model_path = os.path.join(opt.cache_folder, 'SupCE/{}_models'.format(opt.dataset))
opt.tb_path = os.path.join(opt.cache_folder, 'SupCE/{}_tensorboard'.format(opt.dataset))
elif opt.encoding_type == 'inverter':
if opt.method == 'SupCon':
opt.method = 'SupInv'
elif opt.method == 'SimCLR':
opt.method = 'UnsupInv'
opt.model_path = os.path.join(opt.cache_folder, '{}/{}_models'.format(opt.method, opt.dataset))
opt.tb_path = os.path.join(opt.cache_folder, '{}/{}_tensorboard'.format(opt.method, opt.dataset))
else:
opt.model_path = os.path.join(opt.cache_folder, '{}/{}_models'.format(opt.method, opt.dataset))
opt.tb_path = os.path.join(opt.cache_folder, '{}/{}_tensorboard'.format(opt.method, opt.dataset))
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_{}_ncontrast.{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, opt.model, opt.numcontrast, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.encoding_type == 'inverter':
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, opt.model, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
opt.model_name = '{}_{}'.format(opt.model_name, os.path.basename(opt.data_folder))
if opt.batch_size > 256:
opt.warm = True
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
if opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
opt.img_size = 128
opt.n_cls = 1000
elif opt.dataset == 'cifar10' or opt.dataset == 'cifar100':
opt.img_size = 32
return opt
def set_loader(opt):
if opt.dataset == 'cifar10':
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.2010)
elif opt.dataset == 'cifar100':
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
elif opt.dataset == 'biggan' or opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
normalize = transforms.Normalize(mean=mean, std=std)
opt.mean = mean
opt.std = std
if opt.removeimtf:
print('>>> removeimtf is ON.')
train_transform = transforms.Compose([
transforms.CenterCrop(size=int(opt.img_size*0.875)),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.RandomResizedCrop(size=int(opt.img_size*0.875), scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
normalize,
])
if opt.dataset == 'cifar10':
train_dataset = datasets.CIFAR10(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'cifar100':
train_dataset = datasets.CIFAR100(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'imagenet100' or opt.dataset == 'imagenet100K' or opt.dataset == 'imagenet':
train_dataset = datasets.ImageFolder(root=os.path.join(opt.data_folder, 'train'),
transform=TwoCropTransform(train_transform))
elif opt.dataset == 'biggan':
if opt.walk_method == 'random':
train_dataset = GansetDataset(root_dir=os.path.join(opt.data_folder, 'train'),
transform=train_transform, numcontrast=opt.numcontrast,
method=opt.method)
elif opt.walk_method == 'steer':
train_dataset = GansteerDataset(root_dir=os.path.join(opt.data_folder, 'train'),
transform=train_transform, numcontrast=opt.numcontrast,
method=opt.method)
elif opt.walk_method == 'none':
train_dataset = datasets.ImageFolder(root=os.path.join(opt.data_folder, 'train'),
transform=TwoCropTransform(train_transform))
et)
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)
return train_loader
def set_model(opt):
if opt.encoding_type == 'contrastive':
model = SupConResNet(name=opt.model, img_size=int(opt.img_size*0.875))
criterion = SupConLoss(temperature=opt.temp)
elif opt.encoding_type == 'crossentropy':
model = SupCEResNet(name=opt.model, num_classes=opt.n_cls, img_size=int(opt.img_size*0.875))
criterion = torch.nn.CrossEntropyLoss()
elif opt.encoding_type == 'inverter':
if opt.method == 'SupInv':
model = SupInverterResNet(name=opt.model, img_size=int(opt.img_size*0.875))
criterion = SupInverterLoss()
elif opt.method == 'UnsupInv':
model = UnsupInverterResNet(name=opt.model, img_size=int(opt.img_size*0.875))
criterion = UnsupInverterLoss()
if opt.syncBN:
model = apex.parallel.convert_syncbn_model(model)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
model.encoder = torch.nn.DataParallel(model.encoder)
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
return model, criterion
def train(train_loader, model, criterion, optimizer, epoch, opt):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = 0 1)
# t2 = time.time()
if len(data) == 2:
images = data[0]
labels = data[1]
elif len(data) == 3:
images = data[:2]
labels = data[2]
elif len(data) == 4:
images = data[:2]
labels = data[2]
labels_class = data[3]
elif len(data) == 5:
images = data[:2]
labels = data[2]
labels_class = data[3]
z_vect = data[4]
else:
raise NotImplementedError
data_time.update(time.time() - end)
if opt.encoding_type == 'crossentropy':
# We only pick one of images
images = images[1]
elif opt.encoding_type == 'inverter':
# We only pick one of images and that's anchor
z_vect = z_vect[0].cuda(non_blocking=True)
elif opt.encoding_type == 'contrastive':
ims = images[0]
anchors = images[1]
images = torch.cat([images[0].unsqueeze(1), images[1].unsqueeze(1)],
dim=1)
images = images.view(-1, 3, int(opt.img_size*0.875), int(opt.img_size*0.875)).cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
bsz = labels.shape[0]
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
if opt.encoding_type == 'contrastive':
features = model(images)
features = features.view(bsz, 2, -1)
if opt.method == 'SupCon':
loss = criterion(features, labels)
elif opt.method == 'SimCLR':
loss = criterion(features)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.method))
elif opt.encoding_type == 'crossentropy':
output = model(images)
loss = criterion(output, labels)
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
top1.update(acc1[0], bsz)
elif opt.encoding_type == 'inverter':
top1 = 0
output = model(images.cuda())
if opt.method == 'SupInv':
loss, loss_z, loss_y = criterion(output, z_vect, labels)
elif opt.method == 'UnsupInv':
loss = criterion(output, z_vect)
top1 = 0
else:
raise NotImplementedError
losses.update(loss.item(), bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if (idx + 1) % opt.print_freq == 0:
if opt.encoding_type == 'crossentropy':
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
else:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
other_metrics = {}
if opt.encoding_type == 'crossentropy':
other_metrics['top1_acc'] = top1.avg
elif opt.encoding_type == 'contrastive':
if opt.showimg:
other_metrics['image'] = [ims[:8], anchors[:8]]
elif opt.encoding_type == 'inverter':
if opt.showimg:
other_metrics['image'] = [images[:8]]
if opt.method == 'SupInv':
return losses.avg, other_metrics, loss_z, loss_y
elif opt.method == 'UnsupInv':
return losses.avg, other_metrics, loss
else:
return losses.avg, other_metrics
def main():
opt = parse_option()
with open(os.path.join(opt.save_folder, 'optE.yml'), 'w') as f:
yaml.dump(vars(opt), f, default_flow_style=False)
train_loader = set_loader(opt)
model, criterion = set_model(opt)
optimizer = set_optimizer(opt, model)
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
for epoch in range(1, opt.epochs + 1):
adjust_learning_rate(opt, optimizer, epoch)
time1 = time.time()
if opt.method == 'SupInv':
loss, other_metrics,loss_z, loss_y = train(train_loader, model, criterion, optimizer, epoch, opt)
logger.log_value('total loss', loss, epoch)
logger.log_value('loss_z', loss_z, epoch)
logger.log_value('loss_y', loss_y, epoch)
elif opt.method == 'UnsupInv':
loss, other_metrics,loss = train(train_loader, model, criterion, optimizer, epoch, opt)
logger.log_value('loss_z', loss, epoch)
else:
loss, other_metrics = train(train_loader, model, criterion, optimizer, epoch, opt)
logger.log_value('loss', loss, epoch)
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
for metric_name, metric_value in other_metrics.items():
if metric_name == 'image':
images = metric_value
anchors = images[0]
otherims = images[1]
bs = anchors.shape[0]
grid_images = vutils.make_grid(
torch.cat((anchors, otherims)), nrow=bs)
grid_images *= np.array(opt.std)[:, None, None]
grid_images += np.array(opt.mean)[:, None, None]
grid_images = (255*grid_images.cpu().numpy()).astype(np.uint8)
grid_images = grid_images[None, :].transpose(0,2,3,1)
logger.log_images(metric_name, grid_images, epoch)
else:
logger.log_value(metric_name, metric_value, epoch)
if epoch % opt.save_freq == 0:
save_file = os.path.join(
opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
save_model(model, optimizer, opt, epoch, save_file)
save_file = os.path.join(
opt.save_folder, 'last.pth')
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == '__main__':
main()
| true
| true
|
1c41170638461112b76310613551c74f5b962a30
| 2,150
|
py
|
Python
|
tensorpack/tfutils/symbolic_functions.py
|
Neovairis/tensorpack
|
ca0969089847c37a893a8e99317214c5899278db
|
[
"Apache-2.0"
] | 1
|
2020-07-06T20:27:02.000Z
|
2020-07-06T20:27:02.000Z
|
tensorpack/tfutils/symbolic_functions.py
|
jrdeco560/tensorpack
|
242dc71cafb9642e68a2bfb58bcf6ad45ccbb35c
|
[
"Apache-2.0"
] | 7
|
2019-12-16T21:58:30.000Z
|
2022-02-10T00:17:01.000Z
|
tensorpack/tfutils/symbolic_functions.py
|
jrdeco560/tensorpack
|
242dc71cafb9642e68a2bfb58bcf6ad45ccbb35c
|
[
"Apache-2.0"
] | 2
|
2019-09-04T00:02:29.000Z
|
2020-07-06T20:27:04.000Z
|
# -*- coding: utf-8 -*-
# File: symbolic_functions.py
import tensorflow as tf
from ..compat import tfv1
from ..utils.develop import deprecated
__all__ = ['print_stat', 'rms']
def print_stat(x, message=None):
""" A simple print Op that might be easier to use than :meth:`tf.Print`.
Use it like: ``x = print_stat(x, message='This is x')``.
"""
if message is None:
message = x.op.name
lst = [tf.shape(x), tf.reduce_mean(x)]
if x.dtype.is_floating:
lst.append(rms(x))
return tf.Print(x, lst + [x], summarize=20,
message=message, name='print_' + x.op.name)
# for internal use only
def rms(x, name=None):
"""
Returns:
root mean square of tensor x.
"""
if name is None:
name = x.op.name + '/rms'
with tfv1.name_scope(None): # name already contains the scope
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
# don't hurt to leave it here
@deprecated("Please implement it by yourself.", "2018-04-28")
def psnr(prediction, ground_truth, maxp=None, name='psnr'):
"""`Peak Signal to Noise Ratio <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
.. math::
PSNR = 20 \cdot \log_{10}(MAX_p) - 10 \cdot \log_{10}(MSE)
Args:
prediction: a :class:`tf.Tensor` representing the prediction signal.
ground_truth: another :class:`tf.Tensor` with the same shape.
maxp: maximum possible pixel value of the image (255 in in 8bit images)
Returns:
A scalar tensor representing the PSNR
"""
maxp = float(maxp)
def log10(x):
with tf.name_scope("log10"):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
mse = tf.reduce_mean(tf.square(prediction - ground_truth))
if maxp is None:
psnr = tf.multiply(log10(mse), -10., name=name)
else:
psnr = tf.multiply(log10(mse), -10.)
psnr = tf.add(tf.multiply(20., log10(maxp)), psnr, name=name)
return psnr
| 29.452055
| 96
| 0.61814
|
import tensorflow as tf
from ..compat import tfv1
from ..utils.develop import deprecated
__all__ = ['print_stat', 'rms']
def print_stat(x, message=None):
if message is None:
message = x.op.name
lst = [tf.shape(x), tf.reduce_mean(x)]
if x.dtype.is_floating:
lst.append(rms(x))
return tf.Print(x, lst + [x], summarize=20,
message=message, name='print_' + x.op.name)
def rms(x, name=None):
if name is None:
name = x.op.name + '/rms'
with tfv1.name_scope(None):
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
@deprecated("Please implement it by yourself.", "2018-04-28")
def psnr(prediction, ground_truth, maxp=None, name='psnr'):
maxp = float(maxp)
def log10(x):
with tf.name_scope("log10"):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
mse = tf.reduce_mean(tf.square(prediction - ground_truth))
if maxp is None:
psnr = tf.multiply(log10(mse), -10., name=name)
else:
psnr = tf.multiply(log10(mse), -10.)
psnr = tf.add(tf.multiply(20., log10(maxp)), psnr, name=name)
return psnr
| true
| true
|
1c41174e348735a3adcfa40669ba4be4956df176
| 1,656
|
py
|
Python
|
thresh_global.py
|
Edward-HE/SDUWH_DIP_2020
|
b0435590e0fb86d37964ad800806e31b9b4757f4
|
[
"MIT"
] | 1
|
2021-07-17T01:12:12.000Z
|
2021-07-17T01:12:12.000Z
|
thresh_global.py
|
Edward-HE/SDUWH_DIP_2020
|
b0435590e0fb86d37964ad800806e31b9b4757f4
|
[
"MIT"
] | null | null | null |
thresh_global.py
|
Edward-HE/SDUWH_DIP_2020
|
b0435590e0fb86d37964ad800806e31b9b4757f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'thresh_global.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_thresh_global(object):
def setupUi(self, thresh_global):
thresh_global.setObjectName("thresh_global")
thresh_global.resize(400, 300)
self.buttonBox = QtWidgets.QDialogButtonBox(thresh_global)
self.buttonBox.setGeometry(QtCore.QRect(-50, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(thresh_global)
self.label.setGeometry(QtCore.QRect(60, 120, 91, 20))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(thresh_global)
self.lineEdit.setGeometry(QtCore.QRect(180, 120, 113, 21))
self.lineEdit.setObjectName("lineEdit")
self.retranslateUi(thresh_global)
self.buttonBox.accepted.connect(thresh_global.accept)
self.buttonBox.rejected.connect(thresh_global.reject)
QtCore.QMetaObject.connectSlotsByName(thresh_global)
def retranslateUi(self, thresh_global):
_translate = QtCore.QCoreApplication.translate
thresh_global.setWindowTitle(_translate("thresh_global", "Dialog"))
self.label.setText(_translate("thresh_global", "请输入阈值:"))
| 42.461538
| 106
| 0.726449
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_thresh_global(object):
def setupUi(self, thresh_global):
thresh_global.setObjectName("thresh_global")
thresh_global.resize(400, 300)
self.buttonBox = QtWidgets.QDialogButtonBox(thresh_global)
self.buttonBox.setGeometry(QtCore.QRect(-50, 240, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.label = QtWidgets.QLabel(thresh_global)
self.label.setGeometry(QtCore.QRect(60, 120, 91, 20))
self.label.setObjectName("label")
self.lineEdit = QtWidgets.QLineEdit(thresh_global)
self.lineEdit.setGeometry(QtCore.QRect(180, 120, 113, 21))
self.lineEdit.setObjectName("lineEdit")
self.retranslateUi(thresh_global)
self.buttonBox.accepted.connect(thresh_global.accept)
self.buttonBox.rejected.connect(thresh_global.reject)
QtCore.QMetaObject.connectSlotsByName(thresh_global)
def retranslateUi(self, thresh_global):
_translate = QtCore.QCoreApplication.translate
thresh_global.setWindowTitle(_translate("thresh_global", "Dialog"))
self.label.setText(_translate("thresh_global", "请输入阈值:"))
| true
| true
|
1c4117f965afeacf608b6fb6210ad877a225ed13
| 2,499
|
py
|
Python
|
static/brythonlib/cs1graphics/text.py
|
pythonpad/vue-pythonpad-runner
|
52decba9607b3b7b050ee0bf6dd4ef07ae644587
|
[
"MIT"
] | 3
|
2021-01-26T16:18:45.000Z
|
2021-09-15T00:57:12.000Z
|
static/brythonlib/cs1graphics/text.py
|
pythonpad/vue-pythonpad-runner
|
52decba9607b3b7b050ee0bf6dd4ef07ae644587
|
[
"MIT"
] | null | null | null |
static/brythonlib/cs1graphics/text.py
|
pythonpad/vue-pythonpad-runner
|
52decba9607b3b7b050ee0bf6dd4ef07ae644587
|
[
"MIT"
] | 2
|
2021-01-26T16:18:47.000Z
|
2021-10-21T20:45:20.000Z
|
from .drawable import Drawable
from .point import Point
PT_PIXEL_RATIO = 1.32
class Text(Drawable):
def __init__(self, message='', fontsize=12, centerPt=None):
if not isinstance(message, str):
raise TypeError('message must be a string')
if not isinstance(fontsize, (int, float)):
raise TypeError('fontsize must be numeric')
if fontsize <= 0:
raise ValueError('fontsize must be positive')
if centerPt and not isinstance(centerPt, Point):
raise TypeError('center must be a Point')
Drawable.__init__(self)
self.text = message
self.size = fontsize
self.color = [0, 0, 0]
self.initx = -self.getRenderedWidth() / 2
self.inity = -self.getRenderedHeight() / 2
if centerPt:
self.move(centerPt.getX(), centerPt.getY())
def __deepcopy__(self, memo={}):
drawable = super().__deepcopy__()
drawable.text = self.text
drawable.size = self.size
drawable.color = self.color
drawable.initx = self.initx
drawable.inity = self.inity
return drawable
def clone(self):
return self.__deepcopy__()
def draw(self):
d = Drawable.draw(self)
d['type'] = 'text'
d['initx'] = self.initx
d['inity'] = self.inity
d['text'] = self.text
d['size'] = self.size * PT_PIXEL_RATIO
d['color'] = self.color
return d
def getRenderedWidth(self):
lines = self.text.split('\n')
maxlen = max([len(line) for line in lines])
return self.size * PT_PIXEL_RATIO * 0.5 * maxlen
def getRenderedHeight(self):
lines = self.text.split('\n')
return self.size * PT_PIXEL_RATIO * len(lines)
def getDimensions(self):
return self.getRenderedWidth(), self.getRenderedHeight()
def getFontColor(self):
return self.color
def getFontSize(self):
return self.size
def getMessage(self):
return self.text
def setFontColor(self, color):
self.color = color
self.update()
def setFontSize(self, fontsize):
self.size = fontsize
self.initx = -self.getRenderedWidth() / 2
self.inity = -self.getRenderedHeight() / 2
self.update()
def setMessage(self, message):
self.text = message
self.initx = -self.getRenderedWidth() / 2
self.inity = -self.getRenderedHeight() / 2
self.update()
| 29.75
| 64
| 0.595038
|
from .drawable import Drawable
from .point import Point
PT_PIXEL_RATIO = 1.32
class Text(Drawable):
def __init__(self, message='', fontsize=12, centerPt=None):
if not isinstance(message, str):
raise TypeError('message must be a string')
if not isinstance(fontsize, (int, float)):
raise TypeError('fontsize must be numeric')
if fontsize <= 0:
raise ValueError('fontsize must be positive')
if centerPt and not isinstance(centerPt, Point):
raise TypeError('center must be a Point')
Drawable.__init__(self)
self.text = message
self.size = fontsize
self.color = [0, 0, 0]
self.initx = -self.getRenderedWidth() / 2
self.inity = -self.getRenderedHeight() / 2
if centerPt:
self.move(centerPt.getX(), centerPt.getY())
def __deepcopy__(self, memo={}):
drawable = super().__deepcopy__()
drawable.text = self.text
drawable.size = self.size
drawable.color = self.color
drawable.initx = self.initx
drawable.inity = self.inity
return drawable
def clone(self):
return self.__deepcopy__()
def draw(self):
d = Drawable.draw(self)
d['type'] = 'text'
d['initx'] = self.initx
d['inity'] = self.inity
d['text'] = self.text
d['size'] = self.size * PT_PIXEL_RATIO
d['color'] = self.color
return d
def getRenderedWidth(self):
lines = self.text.split('\n')
maxlen = max([len(line) for line in lines])
return self.size * PT_PIXEL_RATIO * 0.5 * maxlen
def getRenderedHeight(self):
lines = self.text.split('\n')
return self.size * PT_PIXEL_RATIO * len(lines)
def getDimensions(self):
return self.getRenderedWidth(), self.getRenderedHeight()
def getFontColor(self):
return self.color
def getFontSize(self):
return self.size
def getMessage(self):
return self.text
def setFontColor(self, color):
self.color = color
self.update()
def setFontSize(self, fontsize):
self.size = fontsize
self.initx = -self.getRenderedWidth() / 2
self.inity = -self.getRenderedHeight() / 2
self.update()
def setMessage(self, message):
self.text = message
self.initx = -self.getRenderedWidth() / 2
self.inity = -self.getRenderedHeight() / 2
self.update()
| true
| true
|
1c41183278974b02e6fb9500ddd99eac4fc9c1ad
| 486
|
py
|
Python
|
main.py
|
doc22940/Visualize-Realtime-Data-Stream-Chart-in-Flask
|
e629ca011b8d54ab1a0c74271ea238b7fe1e459e
|
[
"MIT"
] | 1
|
2020-03-06T18:30:15.000Z
|
2020-03-06T18:30:15.000Z
|
main.py
|
doc22940/Visualize-Realtime-Data-Stream-Chart-in-Flask
|
e629ca011b8d54ab1a0c74271ea238b7fe1e459e
|
[
"MIT"
] | null | null | null |
main.py
|
doc22940/Visualize-Realtime-Data-Stream-Chart-in-Flask
|
e629ca011b8d54ab1a0c74271ea238b7fe1e459e
|
[
"MIT"
] | null | null | null |
from flask_handler import start_flask_application, app
import webbrowser
import threading
import functools
from config_handler import ConfigHandler
"""
Start Program with this file by running "python3 start.py"
"""
[HOST, PORT] = ConfigHandler().get_all("Website") # pylint: disable=unbalanced-tuple-unpacking
url = "http://"+HOST+":{0}".format(PORT)
if __name__ == '__main__':
threading.Timer(1, functools.partial( webbrowser.open, url )).start()
start_flask_application()
| 27
| 94
| 0.755144
|
from flask_handler import start_flask_application, app
import webbrowser
import threading
import functools
from config_handler import ConfigHandler
[HOST, PORT] = ConfigHandler().get_all("Website")
url = "http://"+HOST+":{0}".format(PORT)
if __name__ == '__main__':
threading.Timer(1, functools.partial( webbrowser.open, url )).start()
start_flask_application()
| true
| true
|
1c41185a4314796bf23baa1446d282fa789f7ad5
| 1,016
|
py
|
Python
|
src/insertDelete/set.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | null | null | null |
src/insertDelete/set.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | null | null | null |
src/insertDelete/set.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | 1
|
2021-04-28T18:17:55.000Z
|
2021-04-28T18:17:55.000Z
|
import random
class RandomizedSet:
def __init__(self):
self.dc = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.dc:
return False
else:
self.dc[val] = 1
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.dc:
del self.dc[val]
return True
else:
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return random.choice(list(self.dc))
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| 24.780488
| 107
| 0.538386
|
import random
class RandomizedSet:
def __init__(self):
self.dc = {}
def insert(self, val: int) -> bool:
if val in self.dc:
return False
else:
self.dc[val] = 1
return True
def remove(self, val: int) -> bool:
if val in self.dc:
del self.dc[val]
return True
else:
return False
def getRandom(self) -> int:
return random.choice(list(self.dc))
| true
| true
|
1c411886ac7d3a4d304e2c9264da253cd882e4e2
| 18,675
|
py
|
Python
|
mypy/server/astmerge.py
|
amikrop/mypy
|
c701d3a8c9696440f005a23ab620a8b6f12ce416
|
[
"PSF-2.0"
] | 35
|
2016-03-30T09:25:14.000Z
|
2022-03-12T10:53:11.000Z
|
mypy/server/astmerge.py
|
amikrop/mypy
|
c701d3a8c9696440f005a23ab620a8b6f12ce416
|
[
"PSF-2.0"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
env/lib/python3.9/site-packages/mypy/server/astmerge.py
|
simotwo/AbileneParadox-ddd
|
c85961efb37aba43c0d99ed1c36d083507e2b2d3
|
[
"MIT"
] | 6
|
2016-01-29T04:33:27.000Z
|
2019-11-03T19:19:43.000Z
|
"""Merge a new version of a module AST and symbol table to older versions of those.
When the source code of a module has a change in fine-grained incremental mode,
we build a new AST from the updated source. However, other parts of the program
may have direct references to parts of the old AST (namely, those nodes exposed
in the module symbol table). The merge operation changes the identities of new
AST nodes that have a correspondence in the old AST to the old ones so that
existing cross-references in other modules will continue to point to the correct
nodes. Also internal cross-references within the new AST are replaced. AST nodes
that aren't externally visible will get new, distinct object identities. This
applies to most expression and statement nodes, for example.
We perform this merge operation so that we don't have to update all
external references (which would be slow and fragile) or always perform
translation when looking up references (which would be hard to retrofit).
The AST merge operation is performed after semantic analysis. Semantic
analysis has to deal with potentially multiple aliases to certain AST
nodes (in particular, MypyFile nodes). Type checking assumes that we
don't have multiple variants of a single AST node visible to the type
checker.
Discussion of some notable special cases:
* If a node is replaced with a different kind of node (say, a function is
replaced with a class), we don't perform the merge. Fine-grained dependencies
will be used to rebind all references to the node.
* If a function is replaced with another function with an identical signature,
call sites continue to point to the same object (by identity) and don't need
to be reprocessed. Similarly, if a class is replaced with a class that is
sufficiently similar (MRO preserved, etc.), class references don't need any
processing. A typical incremental update to a file only changes a few
externally visible things in a module, and this means that often only few
external references need any processing, even if the modified module is large.
* A no-op update of a module should not require any processing outside the
module, since all relevant object identities are preserved.
* The AST diff operation (mypy.server.astdiff) and the top-level fine-grained
incremental logic (mypy.server.update) handle the cases where the new AST has
differences from the old one that may need to be propagated to elsewhere in the
program.
See the main entry point merge_asts for more details.
"""
from typing import Dict, List, cast, TypeVar, Optional
from mypy.nodes import (
MypyFile, SymbolTable, Block, AssignmentStmt, NameExpr, MemberExpr, RefExpr, TypeInfo,
FuncDef, ClassDef, NamedTupleExpr, SymbolNode, Var, Statement, SuperExpr, NewTypeExpr,
OverloadedFuncDef, LambdaExpr, TypedDictExpr, EnumCallExpr, FuncBase, TypeAliasExpr, CallExpr,
CastExpr, TypeAlias,
MDEF
)
from mypy.traverser import TraverserVisitor
from mypy.types import (
Type, SyntheticTypeVisitor, Instance, AnyType, NoneType, CallableType, ErasedType, DeletedType,
TupleType, TypeType, TypeVarType, TypedDictType, UnboundType, UninhabitedType, UnionType,
Overloaded, TypeVarDef, TypeList, CallableArgument, EllipsisType, StarType, LiteralType,
RawExpressionType, PartialType, PlaceholderType, TypeAliasType
)
from mypy.util import get_prefix, replace_object_state
from mypy.typestate import TypeState
def merge_asts(old: MypyFile, old_symbols: SymbolTable,
new: MypyFile, new_symbols: SymbolTable) -> None:
"""Merge a new version of a module AST to a previous version.
The main idea is to preserve the identities of externally visible
nodes in the old AST (that have a corresponding node in the new AST).
All old node state (outside identity) will come from the new AST.
When this returns, 'old' will refer to the merged AST, but 'new_symbols'
will be the new symbol table. 'new' and 'old_symbols' will no longer be
valid.
"""
assert new.fullname == old.fullname
# Find the mapping from new to old node identities for all nodes
# whose identities should be preserved.
replacement_map = replacement_map_from_symbol_table(
old_symbols, new_symbols, prefix=old.fullname)
# Also replace references to the new MypyFile node.
replacement_map[new] = old
# Perform replacements to everywhere within the new AST (not including symbol
# tables).
node = replace_nodes_in_ast(new, replacement_map)
assert node is old
# Also replace AST node references in the *new* symbol table (we'll
# continue to use the new symbol table since it has all the new definitions
# that have no correspondence in the old AST).
replace_nodes_in_symbol_table(new_symbols, replacement_map)
def replacement_map_from_symbol_table(
old: SymbolTable, new: SymbolTable, prefix: str) -> Dict[SymbolNode, SymbolNode]:
"""Create a new-to-old object identity map by comparing two symbol table revisions.
Both symbol tables must refer to revisions of the same module id. The symbol tables
are compared recursively (recursing into nested class symbol tables), but only within
the given module prefix. Don't recurse into other modules accessible through the symbol
table.
"""
replacements = {} # type: Dict[SymbolNode, SymbolNode]
for name, node in old.items():
if (name in new and (node.kind == MDEF
or node.node and get_prefix(node.node.fullname) == prefix)):
new_node = new[name]
if (type(new_node.node) == type(node.node) # noqa
and new_node.node and node.node and
new_node.node.fullname == node.node.fullname and
new_node.kind == node.kind):
replacements[new_node.node] = node.node
if isinstance(node.node, TypeInfo) and isinstance(new_node.node, TypeInfo):
type_repl = replacement_map_from_symbol_table(
node.node.names,
new_node.node.names,
prefix)
replacements.update(type_repl)
return replacements
def replace_nodes_in_ast(node: SymbolNode,
replacements: Dict[SymbolNode, SymbolNode]) -> SymbolNode:
"""Replace all references to replacement map keys within an AST node, recursively.
Also replace the *identity* of any nodes that have replacements. Return the
*replaced* version of the argument node (which may have a different identity, if
it's included in the replacement map).
"""
visitor = NodeReplaceVisitor(replacements)
node.accept(visitor)
return replacements.get(node, node)
SN = TypeVar('SN', bound=SymbolNode)
class NodeReplaceVisitor(TraverserVisitor):
"""Transform some nodes to new identities in an AST.
Only nodes that live in the symbol table may be
replaced, which simplifies the implementation some. Also
replace all references to the old identities.
"""
def __init__(self, replacements: Dict[SymbolNode, SymbolNode]) -> None:
self.replacements = replacements
def visit_mypy_file(self, node: MypyFile) -> None:
node = self.fixup(node)
node.defs = self.replace_statements(node.defs)
super().visit_mypy_file(node)
def visit_block(self, node: Block) -> None:
super().visit_block(node)
node.body = self.replace_statements(node.body)
def visit_func_def(self, node: FuncDef) -> None:
node = self.fixup(node)
self.process_base_func(node)
super().visit_func_def(node)
def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None:
self.process_base_func(node)
super().visit_overloaded_func_def(node)
def visit_class_def(self, node: ClassDef) -> None:
# TODO additional things?
node.info = self.fixup_and_reset_typeinfo(node.info)
node.defs.body = self.replace_statements(node.defs.body)
info = node.info
for tv in node.type_vars:
self.process_type_var_def(tv)
if info:
if info.is_named_tuple:
self.process_synthetic_type_info(info)
else:
self.process_type_info(info)
super().visit_class_def(node)
def process_base_func(self, node: FuncBase) -> None:
self.fixup_type(node.type)
node.info = self.fixup(node.info)
if node.unanalyzed_type:
# Unanalyzed types can have AST node references
self.fixup_type(node.unanalyzed_type)
def process_type_var_def(self, tv: TypeVarDef) -> None:
for value in tv.values:
self.fixup_type(value)
self.fixup_type(tv.upper_bound)
def visit_assignment_stmt(self, node: AssignmentStmt) -> None:
self.fixup_type(node.type)
super().visit_assignment_stmt(node)
# Expressions
def visit_name_expr(self, node: NameExpr) -> None:
self.visit_ref_expr(node)
def visit_member_expr(self, node: MemberExpr) -> None:
if node.def_var:
node.def_var = self.fixup(node.def_var)
self.visit_ref_expr(node)
super().visit_member_expr(node)
def visit_ref_expr(self, node: RefExpr) -> None:
if node.node is not None:
node.node = self.fixup(node.node)
if isinstance(node.node, Var):
# The Var node may be an orphan and won't otherwise be processed.
node.node.accept(self)
def visit_namedtuple_expr(self, node: NamedTupleExpr) -> None:
super().visit_namedtuple_expr(node)
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
def visit_cast_expr(self, node: CastExpr) -> None:
super().visit_cast_expr(node)
self.fixup_type(node.type)
def visit_super_expr(self, node: SuperExpr) -> None:
super().visit_super_expr(node)
if node.info is not None:
node.info = self.fixup(node.info)
def visit_call_expr(self, node: CallExpr) -> None:
super().visit_call_expr(node)
if isinstance(node.analyzed, SymbolNode):
node.analyzed = self.fixup(node.analyzed)
def visit_newtype_expr(self, node: NewTypeExpr) -> None:
if node.info:
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
self.fixup_type(node.old_type)
super().visit_newtype_expr(node)
def visit_lambda_expr(self, node: LambdaExpr) -> None:
node.info = self.fixup(node.info)
super().visit_lambda_expr(node)
def visit_typeddict_expr(self, node: TypedDictExpr) -> None:
super().visit_typeddict_expr(node)
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
def visit_enum_call_expr(self, node: EnumCallExpr) -> None:
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
super().visit_enum_call_expr(node)
def visit_type_alias_expr(self, node: TypeAliasExpr) -> None:
self.fixup_type(node.type)
super().visit_type_alias_expr(node)
# Others
def visit_var(self, node: Var) -> None:
node.info = self.fixup(node.info)
self.fixup_type(node.type)
super().visit_var(node)
def visit_type_alias(self, node: TypeAlias) -> None:
self.fixup_type(node.target)
super().visit_type_alias(node)
# Helpers
def fixup(self, node: SN) -> SN:
if node in self.replacements:
new = self.replacements[node]
replace_object_state(new, node)
return cast(SN, new)
return node
def fixup_and_reset_typeinfo(self, node: TypeInfo) -> TypeInfo:
"""Fix-up type info and reset subtype caches.
This needs to be called at least once per each merged TypeInfo, as otherwise we
may leak stale caches.
"""
if node in self.replacements:
# The subclass relationships may change, so reset all caches relevant to the
# old MRO.
new = cast(TypeInfo, self.replacements[node])
TypeState.reset_all_subtype_caches_for(new)
return self.fixup(node)
def fixup_type(self, typ: Optional[Type]) -> None:
if typ is not None:
typ.accept(TypeReplaceVisitor(self.replacements))
def process_type_info(self, info: Optional[TypeInfo]) -> None:
if info is None:
return
self.fixup_type(info.declared_metaclass)
self.fixup_type(info.metaclass_type)
self.fixup_type(info._promote)
self.fixup_type(info.tuple_type)
self.fixup_type(info.typeddict_type)
info.defn.info = self.fixup(info)
replace_nodes_in_symbol_table(info.names, self.replacements)
for i, item in enumerate(info.mro):
info.mro[i] = self.fixup(info.mro[i])
for i, base in enumerate(info.bases):
self.fixup_type(info.bases[i])
def process_synthetic_type_info(self, info: TypeInfo) -> None:
# Synthetic types (types not created using a class statement) don't
# have bodies in the AST so we need to iterate over their symbol
# tables separately, unlike normal classes.
self.process_type_info(info)
for name, node in info.names.items():
if node.node:
node.node.accept(self)
def replace_statements(self, nodes: List[Statement]) -> List[Statement]:
result = []
for node in nodes:
if isinstance(node, SymbolNode):
node = self.fixup(node)
result.append(node)
return result
class TypeReplaceVisitor(SyntheticTypeVisitor[None]):
"""Similar to NodeReplaceVisitor, but for type objects.
Note: this visitor may sometimes visit unanalyzed types
such as 'UnboundType' and 'RawExpressionType' For example, see
NodeReplaceVisitor.process_base_func.
"""
def __init__(self, replacements: Dict[SymbolNode, SymbolNode]) -> None:
self.replacements = replacements
def visit_instance(self, typ: Instance) -> None:
typ.type = self.fixup(typ.type)
for arg in typ.args:
arg.accept(self)
if typ.last_known_value:
typ.last_known_value.accept(self)
def visit_type_alias_type(self, typ: TypeAliasType) -> None:
assert typ.alias is not None
typ.alias = self.fixup(typ.alias)
for arg in typ.args:
arg.accept(self)
def visit_any(self, typ: AnyType) -> None:
pass
def visit_none_type(self, typ: NoneType) -> None:
pass
def visit_callable_type(self, typ: CallableType) -> None:
for arg in typ.arg_types:
arg.accept(self)
typ.ret_type.accept(self)
if typ.definition:
# No need to fixup since this is just a cross-reference.
typ.definition = self.replacements.get(typ.definition, typ.definition)
# Fallback can be None for callable types that haven't been semantically analyzed.
if typ.fallback is not None:
typ.fallback.accept(self)
for tv in typ.variables:
if isinstance(tv, TypeVarDef):
tv.upper_bound.accept(self)
for value in tv.values:
value.accept(self)
def visit_overloaded(self, t: Overloaded) -> None:
for item in t.items():
item.accept(self)
# Fallback can be None for overloaded types that haven't been semantically analyzed.
if t.fallback is not None:
t.fallback.accept(self)
def visit_erased_type(self, t: ErasedType) -> None:
# This type should exist only temporarily during type inference
raise RuntimeError
def visit_deleted_type(self, typ: DeletedType) -> None:
pass
def visit_partial_type(self, typ: PartialType) -> None:
raise RuntimeError
def visit_tuple_type(self, typ: TupleType) -> None:
for item in typ.items:
item.accept(self)
# Fallback can be None for implicit tuple types that haven't been semantically analyzed.
if typ.partial_fallback is not None:
typ.partial_fallback.accept(self)
def visit_type_type(self, typ: TypeType) -> None:
typ.item.accept(self)
def visit_type_var(self, typ: TypeVarType) -> None:
typ.upper_bound.accept(self)
for value in typ.values:
value.accept(self)
def visit_typeddict_type(self, typ: TypedDictType) -> None:
for value_type in typ.items.values():
value_type.accept(self)
typ.fallback.accept(self)
def visit_raw_expression_type(self, t: RawExpressionType) -> None:
pass
def visit_literal_type(self, typ: LiteralType) -> None:
typ.fallback.accept(self)
def visit_unbound_type(self, typ: UnboundType) -> None:
for arg in typ.args:
arg.accept(self)
def visit_type_list(self, typ: TypeList) -> None:
for item in typ.items:
item.accept(self)
def visit_callable_argument(self, typ: CallableArgument) -> None:
typ.typ.accept(self)
def visit_ellipsis_type(self, typ: EllipsisType) -> None:
pass
def visit_star_type(self, typ: StarType) -> None:
typ.type.accept(self)
def visit_uninhabited_type(self, typ: UninhabitedType) -> None:
pass
def visit_union_type(self, typ: UnionType) -> None:
for item in typ.items:
item.accept(self)
def visit_placeholder_type(self, t: PlaceholderType) -> None:
for item in t.args:
item.accept(self)
# Helpers
def fixup(self, node: SN) -> SN:
if node in self.replacements:
new = self.replacements[node]
return cast(SN, new)
return node
def replace_nodes_in_symbol_table(symbols: SymbolTable,
replacements: Dict[SymbolNode, SymbolNode]) -> None:
for name, node in symbols.items():
if node.node:
if node.node in replacements:
new = replacements[node.node]
old = node.node
replace_object_state(new, old)
node.node = new
if isinstance(node.node, (Var, TypeAlias)):
# Handle them here just in case these aren't exposed through the AST.
node.node.accept(NodeReplaceVisitor(replacements))
| 39.734043
| 99
| 0.673039
|
from typing import Dict, List, cast, TypeVar, Optional
from mypy.nodes import (
MypyFile, SymbolTable, Block, AssignmentStmt, NameExpr, MemberExpr, RefExpr, TypeInfo,
FuncDef, ClassDef, NamedTupleExpr, SymbolNode, Var, Statement, SuperExpr, NewTypeExpr,
OverloadedFuncDef, LambdaExpr, TypedDictExpr, EnumCallExpr, FuncBase, TypeAliasExpr, CallExpr,
CastExpr, TypeAlias,
MDEF
)
from mypy.traverser import TraverserVisitor
from mypy.types import (
Type, SyntheticTypeVisitor, Instance, AnyType, NoneType, CallableType, ErasedType, DeletedType,
TupleType, TypeType, TypeVarType, TypedDictType, UnboundType, UninhabitedType, UnionType,
Overloaded, TypeVarDef, TypeList, CallableArgument, EllipsisType, StarType, LiteralType,
RawExpressionType, PartialType, PlaceholderType, TypeAliasType
)
from mypy.util import get_prefix, replace_object_state
from mypy.typestate import TypeState
def merge_asts(old: MypyFile, old_symbols: SymbolTable,
new: MypyFile, new_symbols: SymbolTable) -> None:
assert new.fullname == old.fullname
replacement_map = replacement_map_from_symbol_table(
old_symbols, new_symbols, prefix=old.fullname)
replacement_map[new] = old
node = replace_nodes_in_ast(new, replacement_map)
assert node is old
# continue to use the new symbol table since it has all the new definitions
# that have no correspondence in the old AST).
replace_nodes_in_symbol_table(new_symbols, replacement_map)
def replacement_map_from_symbol_table(
old: SymbolTable, new: SymbolTable, prefix: str) -> Dict[SymbolNode, SymbolNode]:
replacements = {} # type: Dict[SymbolNode, SymbolNode]
for name, node in old.items():
if (name in new and (node.kind == MDEF
or node.node and get_prefix(node.node.fullname) == prefix)):
new_node = new[name]
if (type(new_node.node) == type(node.node) # noqa
and new_node.node and node.node and
new_node.node.fullname == node.node.fullname and
new_node.kind == node.kind):
replacements[new_node.node] = node.node
if isinstance(node.node, TypeInfo) and isinstance(new_node.node, TypeInfo):
type_repl = replacement_map_from_symbol_table(
node.node.names,
new_node.node.names,
prefix)
replacements.update(type_repl)
return replacements
def replace_nodes_in_ast(node: SymbolNode,
replacements: Dict[SymbolNode, SymbolNode]) -> SymbolNode:
visitor = NodeReplaceVisitor(replacements)
node.accept(visitor)
return replacements.get(node, node)
SN = TypeVar('SN', bound=SymbolNode)
class NodeReplaceVisitor(TraverserVisitor):
def __init__(self, replacements: Dict[SymbolNode, SymbolNode]) -> None:
self.replacements = replacements
def visit_mypy_file(self, node: MypyFile) -> None:
node = self.fixup(node)
node.defs = self.replace_statements(node.defs)
super().visit_mypy_file(node)
def visit_block(self, node: Block) -> None:
super().visit_block(node)
node.body = self.replace_statements(node.body)
def visit_func_def(self, node: FuncDef) -> None:
node = self.fixup(node)
self.process_base_func(node)
super().visit_func_def(node)
def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None:
self.process_base_func(node)
super().visit_overloaded_func_def(node)
def visit_class_def(self, node: ClassDef) -> None:
# TODO additional things?
node.info = self.fixup_and_reset_typeinfo(node.info)
node.defs.body = self.replace_statements(node.defs.body)
info = node.info
for tv in node.type_vars:
self.process_type_var_def(tv)
if info:
if info.is_named_tuple:
self.process_synthetic_type_info(info)
else:
self.process_type_info(info)
super().visit_class_def(node)
def process_base_func(self, node: FuncBase) -> None:
self.fixup_type(node.type)
node.info = self.fixup(node.info)
if node.unanalyzed_type:
# Unanalyzed types can have AST node references
self.fixup_type(node.unanalyzed_type)
def process_type_var_def(self, tv: TypeVarDef) -> None:
for value in tv.values:
self.fixup_type(value)
self.fixup_type(tv.upper_bound)
def visit_assignment_stmt(self, node: AssignmentStmt) -> None:
self.fixup_type(node.type)
super().visit_assignment_stmt(node)
# Expressions
def visit_name_expr(self, node: NameExpr) -> None:
self.visit_ref_expr(node)
def visit_member_expr(self, node: MemberExpr) -> None:
if node.def_var:
node.def_var = self.fixup(node.def_var)
self.visit_ref_expr(node)
super().visit_member_expr(node)
def visit_ref_expr(self, node: RefExpr) -> None:
if node.node is not None:
node.node = self.fixup(node.node)
if isinstance(node.node, Var):
# The Var node may be an orphan and won't otherwise be processed.
node.node.accept(self)
def visit_namedtuple_expr(self, node: NamedTupleExpr) -> None:
super().visit_namedtuple_expr(node)
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
def visit_cast_expr(self, node: CastExpr) -> None:
super().visit_cast_expr(node)
self.fixup_type(node.type)
def visit_super_expr(self, node: SuperExpr) -> None:
super().visit_super_expr(node)
if node.info is not None:
node.info = self.fixup(node.info)
def visit_call_expr(self, node: CallExpr) -> None:
super().visit_call_expr(node)
if isinstance(node.analyzed, SymbolNode):
node.analyzed = self.fixup(node.analyzed)
def visit_newtype_expr(self, node: NewTypeExpr) -> None:
if node.info:
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
self.fixup_type(node.old_type)
super().visit_newtype_expr(node)
def visit_lambda_expr(self, node: LambdaExpr) -> None:
node.info = self.fixup(node.info)
super().visit_lambda_expr(node)
def visit_typeddict_expr(self, node: TypedDictExpr) -> None:
super().visit_typeddict_expr(node)
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
def visit_enum_call_expr(self, node: EnumCallExpr) -> None:
node.info = self.fixup_and_reset_typeinfo(node.info)
self.process_synthetic_type_info(node.info)
super().visit_enum_call_expr(node)
def visit_type_alias_expr(self, node: TypeAliasExpr) -> None:
self.fixup_type(node.type)
super().visit_type_alias_expr(node)
def visit_var(self, node: Var) -> None:
node.info = self.fixup(node.info)
self.fixup_type(node.type)
super().visit_var(node)
def visit_type_alias(self, node: TypeAlias) -> None:
self.fixup_type(node.target)
super().visit_type_alias(node)
def fixup(self, node: SN) -> SN:
if node in self.replacements:
new = self.replacements[node]
replace_object_state(new, node)
return cast(SN, new)
return node
def fixup_and_reset_typeinfo(self, node: TypeInfo) -> TypeInfo:
if node in self.replacements:
new = cast(TypeInfo, self.replacements[node])
TypeState.reset_all_subtype_caches_for(new)
return self.fixup(node)
def fixup_type(self, typ: Optional[Type]) -> None:
if typ is not None:
typ.accept(TypeReplaceVisitor(self.replacements))
def process_type_info(self, info: Optional[TypeInfo]) -> None:
if info is None:
return
self.fixup_type(info.declared_metaclass)
self.fixup_type(info.metaclass_type)
self.fixup_type(info._promote)
self.fixup_type(info.tuple_type)
self.fixup_type(info.typeddict_type)
info.defn.info = self.fixup(info)
replace_nodes_in_symbol_table(info.names, self.replacements)
for i, item in enumerate(info.mro):
info.mro[i] = self.fixup(info.mro[i])
for i, base in enumerate(info.bases):
self.fixup_type(info.bases[i])
def process_synthetic_type_info(self, info: TypeInfo) -> None:
# have bodies in the AST so we need to iterate over their symbol
# tables separately, unlike normal classes.
self.process_type_info(info)
for name, node in info.names.items():
if node.node:
node.node.accept(self)
def replace_statements(self, nodes: List[Statement]) -> List[Statement]:
result = []
for node in nodes:
if isinstance(node, SymbolNode):
node = self.fixup(node)
result.append(node)
return result
class TypeReplaceVisitor(SyntheticTypeVisitor[None]):
def __init__(self, replacements: Dict[SymbolNode, SymbolNode]) -> None:
self.replacements = replacements
def visit_instance(self, typ: Instance) -> None:
typ.type = self.fixup(typ.type)
for arg in typ.args:
arg.accept(self)
if typ.last_known_value:
typ.last_known_value.accept(self)
def visit_type_alias_type(self, typ: TypeAliasType) -> None:
assert typ.alias is not None
typ.alias = self.fixup(typ.alias)
for arg in typ.args:
arg.accept(self)
def visit_any(self, typ: AnyType) -> None:
pass
def visit_none_type(self, typ: NoneType) -> None:
pass
def visit_callable_type(self, typ: CallableType) -> None:
for arg in typ.arg_types:
arg.accept(self)
typ.ret_type.accept(self)
if typ.definition:
# No need to fixup since this is just a cross-reference.
typ.definition = self.replacements.get(typ.definition, typ.definition)
# Fallback can be None for callable types that haven't been semantically analyzed.
if typ.fallback is not None:
typ.fallback.accept(self)
for tv in typ.variables:
if isinstance(tv, TypeVarDef):
tv.upper_bound.accept(self)
for value in tv.values:
value.accept(self)
def visit_overloaded(self, t: Overloaded) -> None:
for item in t.items():
item.accept(self)
if t.fallback is not None:
t.fallback.accept(self)
def visit_erased_type(self, t: ErasedType) -> None:
# This type should exist only temporarily during type inference
raise RuntimeError
def visit_deleted_type(self, typ: DeletedType) -> None:
pass
def visit_partial_type(self, typ: PartialType) -> None:
raise RuntimeError
def visit_tuple_type(self, typ: TupleType) -> None:
for item in typ.items:
item.accept(self)
# Fallback can be None for implicit tuple types that haven't been semantically analyzed.
if typ.partial_fallback is not None:
typ.partial_fallback.accept(self)
def visit_type_type(self, typ: TypeType) -> None:
typ.item.accept(self)
def visit_type_var(self, typ: TypeVarType) -> None:
typ.upper_bound.accept(self)
for value in typ.values:
value.accept(self)
def visit_typeddict_type(self, typ: TypedDictType) -> None:
for value_type in typ.items.values():
value_type.accept(self)
typ.fallback.accept(self)
def visit_raw_expression_type(self, t: RawExpressionType) -> None:
pass
def visit_literal_type(self, typ: LiteralType) -> None:
typ.fallback.accept(self)
def visit_unbound_type(self, typ: UnboundType) -> None:
for arg in typ.args:
arg.accept(self)
def visit_type_list(self, typ: TypeList) -> None:
for item in typ.items:
item.accept(self)
def visit_callable_argument(self, typ: CallableArgument) -> None:
typ.typ.accept(self)
def visit_ellipsis_type(self, typ: EllipsisType) -> None:
pass
def visit_star_type(self, typ: StarType) -> None:
typ.type.accept(self)
def visit_uninhabited_type(self, typ: UninhabitedType) -> None:
pass
def visit_union_type(self, typ: UnionType) -> None:
for item in typ.items:
item.accept(self)
def visit_placeholder_type(self, t: PlaceholderType) -> None:
for item in t.args:
item.accept(self)
def fixup(self, node: SN) -> SN:
if node in self.replacements:
new = self.replacements[node]
return cast(SN, new)
return node
def replace_nodes_in_symbol_table(symbols: SymbolTable,
replacements: Dict[SymbolNode, SymbolNode]) -> None:
for name, node in symbols.items():
if node.node:
if node.node in replacements:
new = replacements[node.node]
old = node.node
replace_object_state(new, old)
node.node = new
if isinstance(node.node, (Var, TypeAlias)):
node.node.accept(NodeReplaceVisitor(replacements))
| true
| true
|
1c411891a0597cb0401fdb6c761e6a64c989874a
| 12,476
|
py
|
Python
|
anchorecli/cli/account.py
|
bollwyvl/anchore-cli
|
83cf52e54a19e06b31b6aeffcf06e08c5c81a497
|
[
"Apache-2.0"
] | null | null | null |
anchorecli/cli/account.py
|
bollwyvl/anchore-cli
|
83cf52e54a19e06b31b6aeffcf06e08c5c81a497
|
[
"Apache-2.0"
] | null | null | null |
anchorecli/cli/account.py
|
bollwyvl/anchore-cli
|
83cf52e54a19e06b31b6aeffcf06e08c5c81a497
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import re
import json
import click
import anchorecli.clients.apiexternal
config = {}
whoami = {}
@click.group(name='account', short_help='Account operations')
@click.pass_obj
def account(ctx_config):
global config, whoami
config = ctx_config
try:
anchorecli.cli.utils.check_access(config)
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account', {}, err))
sys.exit(2)
try:
ret = anchorecli.clients.apiexternal.get_account(config)
if ret['success']:
whoami['account'] = ret['payload']
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account', {}, err))
sys.exit(2)
try:
ret = anchorecli.clients.apiexternal.get_user(config)
if ret['success']:
whoami['user'] = ret['payload']
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account', {}, err))
sys.exit(2)
@account.command(name='whoami', short_help="Get current account/user information")
def whoami():
global whoami
ecode = 0
print(anchorecli.cli.utils.format_output(config, 'account_whoami', {}, whoami))
anchorecli.cli.utils.doexit(ecode)
@account.command(name='add', short_help="Add a new account (with no populated users by default)")
@click.argument('account_name', nargs=1, required=True)
@click.option('--email', help="Optional email address to associate with account")
def add(account_name, email):
"""
ACCOUNT_NAME: name of new account to create
EMAIL: email address associated with account (optional)
"""
ecode = 0
try:
ret = anchorecli.clients.apiexternal.add_account(config, account_name=account_name, email=email)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_add', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_add', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='get', short_help="Get account information")
@click.argument('account_name', nargs=1, required=True)
def get(account_name):
"""
ACCOUNT_NAME: name of new account to create
"""
ecode = 0
try:
ret = anchorecli.clients.apiexternal.get_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_get', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_get', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='list', short_help="List information about all accounts (admin only)")
def list_accounts():
"""
"""
ecode = 0
try:
ret = anchorecli.clients.apiexternal.list_accounts(config)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_list', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_list', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='del', short_help="Delete an account (must be disabled first)")
@click.argument('account_name', nargs=1, required=True)
@click.option('--dontask', is_flag=True, help="Do not prompt for confirmation of account deletion")
def delete(account_name, dontask):
global input
"""
ACCOUNT_NAME: name of account to delete (must be disabled first)
"""
ecode = 0
answer = "n"
if dontask:
answer = "y"
else:
try:
input = raw_input
except NameError:
pass
try:
answer = input("This operation is irreversible. Really delete account {} along with *all* users and resources associated with this account? (y/N)".format(account_name))
except:
answer = "n"
if answer.lower() == "y":
try:
ret = anchorecli.clients.apiexternal.del_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_delete', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_delete', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='enable', short_help="Enable a disabled account")
@click.argument('account_name', nargs=1, required=True)
def enable(account_name):
"""
ACCOUNT_NAME: name of account to enable
"""
ecode = 0
try:
ret = anchorecli.clients.apiexternal.enable_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_enable', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_enable', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='disable', short_help="Disable an enabled account")
@click.argument('account_name', nargs=1, required=True)
def disable(account_name):
"""
ACCOUNT_NAME: name of account to disable
"""
ecode = 0
try:
ret = anchorecli.clients.apiexternal.disable_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_disable', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_disable', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
# user suboperation
whoami = {}
@account.group(name="user", short_help="Account user operations")
def user():
global config, whoami
@user.command(name="add", short_help="Add a new user")
@click.argument('user_name', nargs=1, required=True)
@click.argument('user_password', nargs=1, required=True)
@click.option('--account', help="Optional account name")
def user_add(user_name, user_password, account):
global whoami
"""
ACCOUNT: optional name of the account to act as
"""
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
# do some input validation
if not re.match(".{6,128}$", user_password):
raise Exception("Please enter a password at least 6 characters long that contains no spaces.")
ret = anchorecli.clients.apiexternal.add_user(config, account_name=account, user_name=user_name, user_password=user_password)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_add', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_add', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="del", short_help="Delete a user")
@click.argument('user_name', nargs=1, required=True)
@click.option('--account', help="Optional account name")
def user_delete(user_name, account):
global whoami
"""
ACCOUNT: optional name of the account to act as
"""
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.del_user(config, account_name=account, user_name=user_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_delete', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_delete', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="get", short_help="Get information about a user")
@click.argument('user_name', nargs=1, required=True)
@click.option('--account', help="Optional account name")
def user_get(user_name, account):
global whoami
"""
ACCOUNT: optional name of the account to act as
"""
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.get_user(config, account_name=account, user_name=user_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_get', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_get', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="list", short_help="Get a list of account users")
@click.option('--account', help="Optional account name")
def user_list(account):
global whoami
"""
ACCOUNT: optional name of the account to act as
"""
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.list_users(config, account_name=account)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_list', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_list', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="setpassword", short_help="(Re)set a user's password credential")
@click.argument('user_password', nargs=1, required=True)
@click.option('--username', help="Optional user name")
@click.option('--account', help="Optional account name")
def user_setpassword(user_password, username, account):
global whoami
"""
ACCOUNT: optional name of the account to act as
"""
if not account:
account = whoami.get('account', {}).get('name', None)
if not username:
username = whoami.get('user', {}).get('username', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.update_user_password(config, account_name=account, user_name=username, user_password=user_password)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_setpassword', {}, ret['payload']))
print ("NOTE: Be sure to change the password you're using for this client if you have reset your own password")
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_setpassword', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
| 32.071979
| 180
| 0.644117
|
import sys
import os
import re
import json
import click
import anchorecli.clients.apiexternal
config = {}
whoami = {}
@click.group(name='account', short_help='Account operations')
@click.pass_obj
def account(ctx_config):
global config, whoami
config = ctx_config
try:
anchorecli.cli.utils.check_access(config)
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account', {}, err))
sys.exit(2)
try:
ret = anchorecli.clients.apiexternal.get_account(config)
if ret['success']:
whoami['account'] = ret['payload']
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account', {}, err))
sys.exit(2)
try:
ret = anchorecli.clients.apiexternal.get_user(config)
if ret['success']:
whoami['user'] = ret['payload']
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account', {}, err))
sys.exit(2)
@account.command(name='whoami', short_help="Get current account/user information")
def whoami():
global whoami
ecode = 0
print(anchorecli.cli.utils.format_output(config, 'account_whoami', {}, whoami))
anchorecli.cli.utils.doexit(ecode)
@account.command(name='add', short_help="Add a new account (with no populated users by default)")
@click.argument('account_name', nargs=1, required=True)
@click.option('--email', help="Optional email address to associate with account")
def add(account_name, email):
ecode = 0
try:
ret = anchorecli.clients.apiexternal.add_account(config, account_name=account_name, email=email)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_add', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_add', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='get', short_help="Get account information")
@click.argument('account_name', nargs=1, required=True)
def get(account_name):
ecode = 0
try:
ret = anchorecli.clients.apiexternal.get_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_get', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_get', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='list', short_help="List information about all accounts (admin only)")
def list_accounts():
ecode = 0
try:
ret = anchorecli.clients.apiexternal.list_accounts(config)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_list', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_list', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='del', short_help="Delete an account (must be disabled first)")
@click.argument('account_name', nargs=1, required=True)
@click.option('--dontask', is_flag=True, help="Do not prompt for confirmation of account deletion")
def delete(account_name, dontask):
global input
ecode = 0
answer = "n"
if dontask:
answer = "y"
else:
try:
input = raw_input
except NameError:
pass
try:
answer = input("This operation is irreversible. Really delete account {} along with *all* users and resources associated with this account? (y/N)".format(account_name))
except:
answer = "n"
if answer.lower() == "y":
try:
ret = anchorecli.clients.apiexternal.del_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_delete', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_delete', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='enable', short_help="Enable a disabled account")
@click.argument('account_name', nargs=1, required=True)
def enable(account_name):
ecode = 0
try:
ret = anchorecli.clients.apiexternal.enable_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_enable', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_enable', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@account.command(name='disable', short_help="Disable an enabled account")
@click.argument('account_name', nargs=1, required=True)
def disable(account_name):
ecode = 0
try:
ret = anchorecli.clients.apiexternal.disable_account(config, account_name=account_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'account_disable', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'account_disable', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
whoami = {}
@account.group(name="user", short_help="Account user operations")
def user():
global config, whoami
@user.command(name="add", short_help="Add a new user")
@click.argument('user_name', nargs=1, required=True)
@click.argument('user_password', nargs=1, required=True)
@click.option('--account', help="Optional account name")
def user_add(user_name, user_password, account):
global whoami
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
if not re.match(".{6,128}$", user_password):
raise Exception("Please enter a password at least 6 characters long that contains no spaces.")
ret = anchorecli.clients.apiexternal.add_user(config, account_name=account, user_name=user_name, user_password=user_password)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_add', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_add', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="del", short_help="Delete a user")
@click.argument('user_name', nargs=1, required=True)
@click.option('--account', help="Optional account name")
def user_delete(user_name, account):
global whoami
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.del_user(config, account_name=account, user_name=user_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_delete', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_delete', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="get", short_help="Get information about a user")
@click.argument('user_name', nargs=1, required=True)
@click.option('--account', help="Optional account name")
def user_get(user_name, account):
global whoami
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.get_user(config, account_name=account, user_name=user_name)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_get', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_get', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="list", short_help="Get a list of account users")
@click.option('--account', help="Optional account name")
def user_list(account):
global whoami
if not account:
account = whoami.get('account', {}).get('name', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.list_users(config, account_name=account)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_list', {}, ret['payload']))
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_list', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@user.command(name="setpassword", short_help="(Re)set a user's password credential")
@click.argument('user_password', nargs=1, required=True)
@click.option('--username', help="Optional user name")
@click.option('--account', help="Optional account name")
def user_setpassword(user_password, username, account):
global whoami
if not account:
account = whoami.get('account', {}).get('name', None)
if not username:
username = whoami.get('user', {}).get('username', None)
ecode = 0
try:
ret = anchorecli.clients.apiexternal.update_user_password(config, account_name=account, user_name=username, user_password=user_password)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'user_setpassword', {}, ret['payload']))
print ("NOTE: Be sure to change the password you're using for this client if you have reset your own password")
else:
raise Exception( json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'user_setpassword', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
| true
| true
|
1c4118f2bcb0d51cc3a6797155944b89df7fa2d2
| 1,080
|
py
|
Python
|
setup.py
|
MoritzR/fints2ledger
|
ad02fae91c0c56507a85091553c1b9de8c2225e0
|
[
"MIT"
] | 17
|
2018-12-28T22:40:27.000Z
|
2021-11-18T11:08:46.000Z
|
setup.py
|
MoritzR/fints2ledger
|
ad02fae91c0c56507a85091553c1b9de8c2225e0
|
[
"MIT"
] | 16
|
2019-06-16T13:37:12.000Z
|
2021-12-29T14:22:19.000Z
|
setup.py
|
MoritzR/fints2ledger
|
ad02fae91c0c56507a85091553c1b9de8c2225e0
|
[
"MIT"
] | 6
|
2019-03-10T13:29:12.000Z
|
2021-08-02T14:55:24.000Z
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='fints2ledger',
version='0.8.0',
description='A tool for downloading transactions from FinTS banking APIs and sorting them into a ledger journal.',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/MoritzR/fints2ledger',
author='Moritz Rumpf',
author_email='moritz.rumpf@gmail.com',
license='MIT',
python_requires='>=3.5.0',
entry_points={
'console_scripts': ['fints2ledger=fints2ledger.main:main'],
},
install_requires=[
'mt-940>=4.11,<5',
'fints>=3,<4',
'pyyaml>=4.2b1,<5'
],
setup_requires=['green'],
packages=['fints2ledger'],
zip_safe=False,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
]
)
| 28.421053
| 120
| 0.586111
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='fints2ledger',
version='0.8.0',
description='A tool for downloading transactions from FinTS banking APIs and sorting them into a ledger journal.',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/MoritzR/fints2ledger',
author='Moritz Rumpf',
author_email='moritz.rumpf@gmail.com',
license='MIT',
python_requires='>=3.5.0',
entry_points={
'console_scripts': ['fints2ledger=fints2ledger.main:main'],
},
install_requires=[
'mt-940>=4.11,<5',
'fints>=3,<4',
'pyyaml>=4.2b1,<5'
],
setup_requires=['green'],
packages=['fints2ledger'],
zip_safe=False,
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
]
)
| true
| true
|
1c4119840ee4f1afbd26028b257183464501fdf3
| 492
|
py
|
Python
|
chat/registration/forms.py
|
welmends/stocks-chat
|
535f56882edd90f85392a0daeabbae372aa62d78
|
[
"Apache-2.0"
] | null | null | null |
chat/registration/forms.py
|
welmends/stocks-chat
|
535f56882edd90f85392a0daeabbae372aa62d78
|
[
"Apache-2.0"
] | null | null | null |
chat/registration/forms.py
|
welmends/stocks-chat
|
535f56882edd90f85392a0daeabbae372aa62d78
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class UserSignUpForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'password1']
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
del self.fields['password2']
for fieldname in ['username', 'password1']:
self.fields[fieldname].help_text = None
| 32.8
| 63
| 0.680894
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class UserSignUpForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'password1']
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
del self.fields['password2']
for fieldname in ['username', 'password1']:
self.fields[fieldname].help_text = None
| true
| true
|
1c4119ed3e3307861e89aa13ec64e00deb301064
| 879
|
py
|
Python
|
tests/test_preloaded.py
|
JayjeetAtGithub/coffea
|
a5583401173859878b52dea44b14ed6c613aea81
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_preloaded.py
|
JayjeetAtGithub/coffea
|
a5583401173859878b52dea44b14ed6c613aea81
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_preloaded.py
|
JayjeetAtGithub/coffea
|
a5583401173859878b52dea44b14ed6c613aea81
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import os
import uproot
from coffea import processor
from coffea.processor.test_items import NanoEvents0Processor
from coffea.nanoaod import NanoEvents
def test_preloaded_nanoevents():
columns = ['nMuon','Muon_pt','Muon_eta','Muon_phi','Muon_mass','Muon_charge', 'nJet', 'Jet_eta']
p = NanoEvents0Processor(columns=columns)
tree = uproot.open(os.path.abspath('tests/samples/nano_dy.root'))['Events']
arrays = tree.arrays(columns, flatten=True, namedecode='ascii')
df = processor.PreloadedDataFrame(tree.numentries, arrays)
print(arrays)
events = NanoEvents.from_arrays(arrays, metadata={'dataset': 'ZJets'})
hists = p.process(events)
print(hists)
assert( hists['cutflow']['ZJets_pt'] == 18 )
assert( hists['cutflow']['ZJets_mass'] == 6 )
with pytest.raises(RuntimeError):
print(events.Muon.matched_jet)
| 32.555556
| 100
| 0.718999
|
import pytest
import os
import uproot
from coffea import processor
from coffea.processor.test_items import NanoEvents0Processor
from coffea.nanoaod import NanoEvents
def test_preloaded_nanoevents():
columns = ['nMuon','Muon_pt','Muon_eta','Muon_phi','Muon_mass','Muon_charge', 'nJet', 'Jet_eta']
p = NanoEvents0Processor(columns=columns)
tree = uproot.open(os.path.abspath('tests/samples/nano_dy.root'))['Events']
arrays = tree.arrays(columns, flatten=True, namedecode='ascii')
df = processor.PreloadedDataFrame(tree.numentries, arrays)
print(arrays)
events = NanoEvents.from_arrays(arrays, metadata={'dataset': 'ZJets'})
hists = p.process(events)
print(hists)
assert( hists['cutflow']['ZJets_pt'] == 18 )
assert( hists['cutflow']['ZJets_mass'] == 6 )
with pytest.raises(RuntimeError):
print(events.Muon.matched_jet)
| true
| true
|
1c4119f61cd68996395d4e7fd48f6dad8413857e
| 339
|
py
|
Python
|
order/admin.py
|
divyesh1099/badboystyle
|
f4fec0858b43e14f0e1f173261f363d4262c28ea
|
[
"MIT"
] | null | null | null |
order/admin.py
|
divyesh1099/badboystyle
|
f4fec0858b43e14f0e1f173261f363d4262c28ea
|
[
"MIT"
] | null | null | null |
order/admin.py
|
divyesh1099/badboystyle
|
f4fec0858b43e14f0e1f173261f363d4262c28ea
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from order.models import Order
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
list_display = ['user', 'date', 'time', 'amount', 'dispatched', 'delivered', 'paid', 'active', 'cancelled']
search_fields = ['user']
admin.site.register(Order, OrderAdmin)
# admin.site.register(Total)
| 33.9
| 111
| 0.722714
|
from django.contrib import admin
from order.models import Order
class OrderAdmin(admin.ModelAdmin):
list_display = ['user', 'date', 'time', 'amount', 'dispatched', 'delivered', 'paid', 'active', 'cancelled']
search_fields = ['user']
admin.site.register(Order, OrderAdmin)
| true
| true
|
1c411a1bf0ee7de1a590bca1985d65eaa7c54a69
| 3,223
|
py
|
Python
|
softwareprocess/utility/aries.py
|
LordOfTheRains/cookieJar
|
57838b9f5103dacad5b3bdc643905e65d576af94
|
[
"MIT"
] | null | null | null |
softwareprocess/utility/aries.py
|
LordOfTheRains/cookieJar
|
57838b9f5103dacad5b3bdc643905e65d576af94
|
[
"MIT"
] | null | null | null |
softwareprocess/utility/aries.py
|
LordOfTheRains/cookieJar
|
57838b9f5103dacad5b3bdc643905e65d576af94
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import math
from angle import Angle
class Aries:
REFERENCE_YEAR = 2001
def __init__(self):
pass
@staticmethod
def get_greenwich_hour_angle(year, month, day, hour, minute, second):
"""
= relative_prime_meridian + earth rotation
:param year:
:param month:
:param day:
:param hour:
:param minute:
:param second:
:return:
"""
reference_datetime_str = str(year) + ",01,01,00,00,00"
observation_datetime_str = str(year) + ',' + str(month) + ',' + str(day) + ','
observation_datetime_str += str(hour) + ',' + str(minute) + ',' + str(second)
observation_datetime = datetime.strptime(observation_datetime_str, '%Y,%m,%d,%H,%M,%S')
reference_datetime = datetime.strptime(reference_datetime_str, '%Y,%m,%d,%H,%M,%S')
elapsed_sed_since_ref = (observation_datetime - reference_datetime).total_seconds()
relative_pm = Aries.__get_relative_prime_meridian(year)
earth_rotation = Aries.__get_earth_rotation_since_observation(elapsed_sed_since_ref)
print("relative_pm" + relative_pm.str)
print("earth_rotation" + earth_rotation.str)
return Angle.add(relative_pm, earth_rotation)
@staticmethod
def __get_relative_prime_meridian(year):
"""
- total progression = 100d42.6 + cumulative prog + leap progs
- cumulative progression: delta(year-2001) * -0d14.31667
- leap progression: (leap years elapsed) * 0d59.0
:param year: observation year
:return: angle of prime meridian
"""
reference_rotation = Angle.from_string("100d42.6")
# cumulative progression: delta(year-2001) * -0d14.31667
annual_gha_decrement = Angle.from_string("-0d14.32")
delta_year = year - Aries.REFERENCE_YEAR
cumulative_progression = Angle.multiply(annual_gha_decrement, delta_year)
# leap progression: (leap years elapsed) * 0d59.0
daily_rotation = Angle.from_string("0d59.0")
leap_years = math.floor((year - Aries.REFERENCE_YEAR)/4)
leap_progression = Angle.multiply(daily_rotation, leap_years)
# total progression = 100d42.6 + cumulative prog + leap progs
total_progression = Angle.add(reference_rotation, cumulative_progression)
total_progression = Angle.add(total_progression, leap_progression)
print("total progression" + total_progression.str)
return total_progression
@staticmethod
def __get_earth_rotation_since_observation(elapsed_seconds):
"""
convert seconds into angles
total sec/86164.1*360d00.0
:param elapsed_seconds: second between ref time and observed time
:return: hour angle
"""
full_angle = Angle.from_string("360d00.0")
rotation = round(elapsed_seconds/86164.1000, 5)
print("full_angle_:" + full_angle.str)
print("rotation_:" + str(rotation))
print("get_earth_rotation_:" + Angle.from_decimal(rotation).str)
return Angle.from_decimal(rotation)
| 39.304878
| 95
| 0.645982
|
from datetime import datetime, timedelta
import math
from angle import Angle
class Aries:
REFERENCE_YEAR = 2001
def __init__(self):
pass
@staticmethod
def get_greenwich_hour_angle(year, month, day, hour, minute, second):
reference_datetime_str = str(year) + ",01,01,00,00,00"
observation_datetime_str = str(year) + ',' + str(month) + ',' + str(day) + ','
observation_datetime_str += str(hour) + ',' + str(minute) + ',' + str(second)
observation_datetime = datetime.strptime(observation_datetime_str, '%Y,%m,%d,%H,%M,%S')
reference_datetime = datetime.strptime(reference_datetime_str, '%Y,%m,%d,%H,%M,%S')
elapsed_sed_since_ref = (observation_datetime - reference_datetime).total_seconds()
relative_pm = Aries.__get_relative_prime_meridian(year)
earth_rotation = Aries.__get_earth_rotation_since_observation(elapsed_sed_since_ref)
print("relative_pm" + relative_pm.str)
print("earth_rotation" + earth_rotation.str)
return Angle.add(relative_pm, earth_rotation)
@staticmethod
def __get_relative_prime_meridian(year):
reference_rotation = Angle.from_string("100d42.6")
annual_gha_decrement = Angle.from_string("-0d14.32")
delta_year = year - Aries.REFERENCE_YEAR
cumulative_progression = Angle.multiply(annual_gha_decrement, delta_year)
daily_rotation = Angle.from_string("0d59.0")
leap_years = math.floor((year - Aries.REFERENCE_YEAR)/4)
leap_progression = Angle.multiply(daily_rotation, leap_years)
total_progression = Angle.add(reference_rotation, cumulative_progression)
total_progression = Angle.add(total_progression, leap_progression)
print("total progression" + total_progression.str)
return total_progression
@staticmethod
def __get_earth_rotation_since_observation(elapsed_seconds):
full_angle = Angle.from_string("360d00.0")
rotation = round(elapsed_seconds/86164.1000, 5)
print("full_angle_:" + full_angle.str)
print("rotation_:" + str(rotation))
print("get_earth_rotation_:" + Angle.from_decimal(rotation).str)
return Angle.from_decimal(rotation)
| true
| true
|
1c411a3e97e8903efae71b19846f3d31fca7a9f9
| 1,211
|
py
|
Python
|
discordbot.py
|
yukina018/discordpy-startup
|
6e25221dcd7e24a468690c7e8d55991fec9a5b17
|
[
"MIT"
] | 2
|
2021-07-17T04:05:10.000Z
|
2021-07-17T04:05:29.000Z
|
discordbot.py
|
yukina018/discordpy-startup
|
6e25221dcd7e24a468690c7e8d55991fec9a5b17
|
[
"MIT"
] | null | null | null |
discordbot.py
|
yukina018/discordpy-startup
|
6e25221dcd7e24a468690c7e8d55991fec9a5b17
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def チャーミィ(ctx):
await ctx.send('はじめまして!ボクCHARMの妖精チャーミィ☆')
@bot.command()
async def ラスバレ(ctx):
await ctx.send('https://assaultlily.bushimo.jp/' )
@bot.command()
async def ツイッター(ctx):
await ctx.send('https://twitter.com/assaultlily_lb')
@bot.command()
async def 画像(ctx):
await ctx.send('https://twitter.com/assaultlily_lb/status/1416774770331316224/photo/1')
@bot.command()
async def ヘルプ(ctx):
await ctx.send('コマンド一覧だよ!\n/ラスバレ\n/ツイッター\n/画像\n/資料')
@bot.command()
async def 資料(ctx):
await ctx.send('https://drive.google.com/file/d/1FFhDMTinBL7HwlZNPV2jGh0uAS0Iq1SJ/view?usp=sharing')
@bot.event
async def on_message(ctx):
if ctx.channel.id != 866022441029206016: # チャンネルが違う場合は無視する
return
await bot.process_commands(ctx)
bot.run(token)
| 24.22
| 105
| 0.698596
|
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def チャーミィ(ctx):
await ctx.send('はじめまして!ボクCHARMの妖精チャーミィ☆')
@bot.command()
async def ラスバレ(ctx):
await ctx.send('https://assaultlily.bushimo.jp/' )
@bot.command()
async def ツイッター(ctx):
await ctx.send('https://twitter.com/assaultlily_lb')
@bot.command()
async def 画像(ctx):
await ctx.send('https://twitter.com/assaultlily_lb/status/1416774770331316224/photo/1')
@bot.command()
async def ヘルプ(ctx):
await ctx.send('コマンド一覧だよ!\n/ラスバレ\n/ツイッター\n/画像\n/資料')
@bot.command()
async def 資料(ctx):
await ctx.send('https://drive.google.com/file/d/1FFhDMTinBL7HwlZNPV2jGh0uAS0Iq1SJ/view?usp=sharing')
@bot.event
async def on_message(ctx):
if ctx.channel.id != 866022441029206016:
return
await bot.process_commands(ctx)
bot.run(token)
| true
| true
|
1c411a5840d5920258e89c30d35db45bb4be35ae
| 414
|
py
|
Python
|
pants-plugins/experimental/fooify/target_types.py
|
sureshjoshi/pants-example-plugin
|
a266a01cb99e632ec21ec9c985a04054adec1185
|
[
"MIT"
] | null | null | null |
pants-plugins/experimental/fooify/target_types.py
|
sureshjoshi/pants-example-plugin
|
a266a01cb99e632ec21ec9c985a04054adec1185
|
[
"MIT"
] | null | null | null |
pants-plugins/experimental/fooify/target_types.py
|
sureshjoshi/pants-example-plugin
|
a266a01cb99e632ec21ec9c985a04054adec1185
|
[
"MIT"
] | null | null | null |
from pants.engine.target import COMMON_TARGET_FIELDS, Dependencies, Target
class FooifyDependenciesField(Dependencies):
pass
class FooifyTarget(Target):
alias = "fooify_distribution"
core_fields = (
*COMMON_TARGET_FIELDS,
FooifyDependenciesField,
)
help = (
"The `fooify` target will take in a wheel dependency and "
"add a .foo extension to the end."
)
| 23
| 74
| 0.681159
|
from pants.engine.target import COMMON_TARGET_FIELDS, Dependencies, Target
class FooifyDependenciesField(Dependencies):
pass
class FooifyTarget(Target):
alias = "fooify_distribution"
core_fields = (
*COMMON_TARGET_FIELDS,
FooifyDependenciesField,
)
help = (
"The `fooify` target will take in a wheel dependency and "
"add a .foo extension to the end."
)
| true
| true
|
1c411b89c3ec5b518508ecb478d6c4326ce7884e
| 24,995
|
py
|
Python
|
EWS/EWS_weekly.py
|
KoenvanLoon/EWS
|
3447921ec2140f29fd69d5b140b5eba2f244bccd
|
[
"MIT"
] | null | null | null |
EWS/EWS_weekly.py
|
KoenvanLoon/EWS
|
3447921ec2140f29fd69d5b140b5eba2f244bccd
|
[
"MIT"
] | null | null | null |
EWS/EWS_weekly.py
|
KoenvanLoon/EWS
|
3447921ec2140f29fd69d5b140b5eba2f244bccd
|
[
"MIT"
] | null | null | null |
"""
EWS - Early Warning Signals
EWS_weekly
@authors: KoenvanLoon & TijmenJanssen
"""
from pcraster import *
import numpy as np
import os
import time
import EWSPy as ews
import EWS_configuration as cfg
import NULL_models_timeseries_weekly as temp_NULL
import NULL_models_spatial_weekly as spat_NULL
import EWS_StateVariables as ews_sv
# State variables for EWS calculations
"""
Variables (state variables) can be both 'ews_sv.variables_weekly' or 'ews_sv.variables_hourly' for calculating
early-warning signals for the week or hour model respectively. State variables present in EWS_StateVariables.py can
be added through the configuration.
Args:
-----
variables : The state variables for which calculations are done.
"""
variables = ews_sv.variables_weekly
# Spatial interval
"""
The spatial interval differs if a cutoff point is selected or not. If there is a cutoff point, no calculations are done
on spatial datasets after this point.
Args:
-----
spatial_ews_interval : 2D numpy array containing the time steps at which a spatial dataset was created.
"""
if not cfg.cutoff:
spatial_ews_interval = np.arange(cfg.interval_map_snapshots, cfg.number_of_timesteps_weekly +
cfg.interval_map_snapshots, cfg.interval_map_snapshots)
elif cfg.cutoff:
spatial_ews_interval = np.arange(cfg.interval_map_snapshots, cfg.cutoff_point + cfg.interval_map_snapshots,
cfg.interval_map_snapshots)
# Time series to time windows
"""
Divides a time series (2D numpy array) into an array of evenly sized time windows (2D numpy arrays). If remaining data-
points do not fill the last time window, they are dropped from the stack of time windows.
Args:
-----
timeseries : A 2D numpy array containing data points of a early-warning signal.
window_size : The size (int) of the windows into which the time series is to be divided.
window_overlap : The number (int) of data points in the window equal to the last data points of the previous time
window.
Returns:
-----
view : A 3D numpy array containing evenly sized time windows (2D numpy arrays).
! - Note that the amount of data points in 'view' does not need to be equal to the amount of data points in
'timeseries' due to the possibility of dropping data points if they do not fill the last time window completely.
"""
def time_series2time_windows(timeseries, window_size=100, window_overlap=0):
actual_window_overlap = window_size - window_overlap
sh = (timeseries.size - window_size + 1, window_size)
st = timeseries.strides * 2
if window_overlap != 0:
view = np.lib.stride_tricks.as_strided(timeseries, strides=st, shape=sh)[0::actual_window_overlap]
elif window_overlap == 0:
view = np.lib.stride_tricks.as_strided(timeseries, strides=st, shape=sh)[0::window_size]
return view
# Generate datasets (initial)
"""
Initializes dataset generation. Datasets are generated for method(s) selected in the configuration when
generate_dummy_datasets is set to True.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
path : str, the filepath where the original dataset can be found.
nr_realizations : int, the number of datasets generated.
method1 : bool, selects whether this method is utilized.
method2 : bool, selects whether this method is utilized.
method3 : bool, selects whether this method is utilized.
"""
def generate_datasets_init(variable, path='./1/', nr_realizations=1, method1=False, method2=False, method3=False):
if variable.temporal:
state_variable_timeseries, files_present = temporal_data_file_loading(variable, path=path)
if files_present:
if state_variable_timeseries.ndim == 1:
# Detrending: 'None', 'Gaussian'
state_variable_timeseries = generate_datasets_main(variable, state_variable_timeseries,
temp_NULL.detrend_, nr_realizations=nr_realizations,
path=path)
# Generate dummy datasets
if method1:
generate_datasets_main(variable, state_variable_timeseries, temp_NULL.method1_,
nr_realizations=nr_realizations, path=path)
if method2:
generate_datasets_main(variable, state_variable_timeseries, temp_NULL.method2_,
nr_realizations=nr_realizations, path=path)
if method3:
generate_datasets_main(variable, state_variable_timeseries, temp_NULL.method3_,
nr_realizations=nr_realizations, path=path)
else:
print(f"Multiple dimensions are currently not supported for generated datasets, so no datasets are being "
f"generated for {variable.name}.")
if variable.spatial:
state_variable_snapshots, files_present = spatial_data_file_loading(variable, path=path)
if files_present:
state_variable_snapshots = np.asarray(state_variable_snapshots)
# Generate dummy datasets
# Detrending: 'None', 'Linear', 'Gaussian'
state_variable_snapshots = generate_datasets_main(variable, state_variable_snapshots, spat_NULL.detrend_,
nr_realizations=nr_realizations, path=path)
if method1:
generate_datasets_main(variable, state_variable_snapshots, spat_NULL.method1_,
nr_realizations=nr_realizations, path=path)
if method2:
generate_datasets_main(variable, state_variable_snapshots, spat_NULL.method2_,
nr_realizations=nr_realizations, path=path)
if method3:
generate_datasets_main(variable, state_variable_snapshots, spat_NULL.method3_,
nr_realizations=nr_realizations, path=path)
# Generate datasets (main)
"""
Initializes dataset generation. Datasets are generated for method(s) selected in the configuration when
generate_dummy_datasets is set to True.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
state_variable : The data for which datasets are to be generated. Can be either temporal or spatial data.
method : function, either detrend_, method1_, method2_ or method3_ from the spatial or temporal null models.
nr_realizations : int, the number of datasets generated.
path : str, the filepath where the original dataset can be found.
Rerturns:
-----
detrended_data : Optional return, only returns when method==detrend_ for time series.
"""
def generate_datasets_main(variable, state_variable, method, nr_realizations=1, path='./1/'):
print(f"Started generating dataset(s) for {variable.name} using {method.__name__}")
detrended_data = method(state_variable, realizations=nr_realizations, path=path, file_name=variable.name)
print(f"Finished generating dataset(s) for {variable.name} using {method.__name__} \n")
if method.__name__ == temp_NULL.detrend_.__name__:
return detrended_data
# Calculate EWS generated datasets (initial)
"""
Initializes calculation of generated datasets by passing them to the ews_calculations_main() function.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
path : str, the filepath where the original dataset can be found.
nr_realizations : int, the number of datasets generated.
timer_on : bool, selects whether calculation time is shown in the console.
method1 , method2 , method3 : bool, selects whether this method is utilized.
"""
def ews_calculations_generated_datasets_init(variable, path='./1/', nr_realizations=1, timer_on=False, method1=False,
method2=False, method3=False):
generated_number_length = ews.generated_number_length(nr_realizations)
if cfg.save_detrended_data and variable.temporal:
ews_calculations_generated_datasets_main(variable, 'dtr', gen_nr_len=generated_number_length, path=path,
nr_realizations=1, timer_on=timer_on)
if method1:
ews_calculations_generated_datasets_main(variable, 'm1g', gen_nr_len=generated_number_length, path=path,
nr_realizations=nr_realizations, timer_on=timer_on)
if method2:
ews_calculations_generated_datasets_main(variable, 'm2g', gen_nr_len=generated_number_length, path=path,
nr_realizations=nr_realizations, timer_on=timer_on)
if method3:
ews_calculations_generated_datasets_main(variable, 'm3g', gen_nr_len=generated_number_length, path=path,
nr_realizations=nr_realizations, timer_on=timer_on)
# Calculate EWS generated datasets (main)
"""
Initializes calculation of generated datasets by passing them to the ews_calculations_init() function.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
path : str, the filepath where the original dataset can be found.
nr_realizations : int, the number of datasets generated.
method1 , method2, method3 : bool, selects whether this method is utilized.
"""
def ews_calculations_generated_datasets_main(variable, method, gen_nr_len=4, path='./1/', nr_realizations=1, timer_on=False):
for realization in range(nr_realizations):
generated_number_string = method + str(realization).zfill(gen_nr_len) + '/'
dir_name = os.path.join(path + generated_number_string)
ews_calculations_init(variable, path=dir_name, timer_on=timer_on)
# Loading temporal data file(s)
"""
Loads files containing temporal data.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
path : str, the filepath where the original dataset can be found.
Returns:
-----
state_variable_timeseries : the timeseries containing the temporal data.
EWS_calculations : bool, whether the datafiles are found and if EWS calculations can be performed.
"""
def temporal_data_file_loading(variable, path='./1/'):
state_variable_timeseries = []
EWS_calculations = True
if variable.datatype == 'numpy':
file_name = ews.file_name_str(variable.name, cfg.number_of_timesteps_weekly)
if os.path.exists(path + file_name + ".numpy.txt"):
state_variable_timeseries = np.loadtxt(path + file_name + ".numpy.txt")
else:
print(f"{file_name}.numpy.txt not found in {path}")
EWS_calculations = False
else:
print(f"Datatype for {variable.name} currently not supported.")
EWS_calculations = False
return state_variable_timeseries, EWS_calculations
# Dividing timeseries into windows
"""
Divides a timeseries (optionally of multiple locations) (2D or 3D numpy array) into multiple windows.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
state_variable_timeseries : The timeseries (2D or 3D numpy array) of the state variable.
Returns:
-----
stack_of_windows : 3D numpy array containing the timeseries subdivided into windows.
nr_dim : int, the number of dimensions of the original timeseries.
stack_x , stack_y : x and y component of a stack of windows for multiple locations before flattening.
"""
def window_stacker(variable, state_variable_timeseries):
nr_dim = state_variable_timeseries.ndim
if nr_dim == 1:
if cfg.cutoff:
state_variable_timeseries = state_variable_timeseries[:cfg.cutoff_point]
stack_of_windows = time_series2time_windows(state_variable_timeseries, variable.window_size,
variable.window_overlap)
stack_x, stack_y = np.asarray(stack_of_windows).shape
else:
stack_of_windows = [0.0] * np.asarray(state_variable_timeseries).shape[1]
for k, timeseries in enumerate(state_variable_timeseries.T):
if cfg.cutoff:
stack_of_windows[k] = time_series2time_windows(timeseries[:cfg.cutoff_point],
variable.window_size, variable.window_overlap)
elif not cfg.cutoff:
stack_of_windows[k] = time_series2time_windows(timeseries, variable.window_size,
variable.window_overlap)
stack_x, stack_y, stack_z = np.asarray(stack_of_windows).shape
stack_of_windows = np.asarray(stack_of_windows).reshape(-1, stack_z)
return stack_of_windows, nr_dim, stack_x, stack_y
# Loading spatial data file(s)
"""
Loads files containing spatial data.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
path : str, the filepath where the original dataset can be found.
Returns:
-----
state_variable_snapshots : the snapshots containing the spatial data.
EWS_calculations : bool, whether the datafiles are found and if EWS calculations can be performed.
"""
def spatial_data_file_loading(variable, path='./1/'):
state_variable_snapshots = [0.0] * len(spatial_ews_interval)
EWS_calculations = True
if variable.datatype == 'numpy':
for k, timestep in enumerate(spatial_ews_interval):
file_name = ews.file_name_str(variable.name, timestep)
if os.path.exists(path + file_name + ".numpy.txt"):
state_variable_snapshots[k] = np.loadtxt(path + file_name + 'numpy.txt')
else:
print(f"{file_name}.numpy.txt not found in {path}.")
EWS_calculations = False
if variable.datatype == 'map':
for k, timestep in enumerate(spatial_ews_interval):
file_name = ews.file_name_str(variable.name, timestep)
if os.path.exists(path + file_name):
state_variable_snapshots[k] = pcr2numpy(readmap(path + file_name), np.NaN)
else:
print(f"{file_name} not found in {path}.")
EWS_calculations = False
else:
print(f"Datatype for {variable.name} currently not supported.")
EWS_calculations = False
return state_variable_snapshots, EWS_calculations
# Calculating and saving EWS
"""
Calculates early-warning signals and saves the results.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
data : The spatial or temporal data from the model.
method_name : str, element of the name under which the EWS is saved which refers to the dimension (s for spatial, t for
temporal) and the statistic/method (e.g. mn for mean).
method_function : function, selects the statistic/method used to calculate the (possible) EWS.
path : str, the filepath where the original dataset can be found.
nr_dim : int, the number of dimensions of the original timeseries.
stack_x , stack_y : x and y component of a stack of windows for multiple locations before flattening.
"""
def ews_calc_and_save(variable, data, method_name, method_function, path='./1/', nr_dim=1, stack_x=1, stack_y=1):
fpath = os.path.join(path + variable.name + method_name)
signal = method_function(data)
if nr_dim > 1:
signal = signal.reshape(stack_x, stack_y)
np.savetxt(fpath + '.numpy.txt', signal)
# Initializing calculating and saving EWS for temporal data
"""
Initializes calculating early-warning signals and saving the results for temporal data.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
state_variable_timeseries : The temporal data from the model.
path : str, the filepath where the original dataset can be found.
"""
def temporal_ews_calculations(variable, state_variable_timeseries, path='./1/'):
stack_of_windows, nr_dim, stack_x, stack_y = window_stacker(variable, state_variable_timeseries)
ews_calc_and_save(variable, stack_of_windows, '.t.mn', ews.temporal_mean, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.std', ews.temporal_std, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.var', ews.temporal_var, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.cv', ews.temporal_cv, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.skw', ews.temporal_skw, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.krt', ews.temporal_krt, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.acr', ews.temporal_autocorrelation, path=path, nr_dim=nr_dim,
stack_x=stack_x, stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.AR1', ews.temporal_AR1, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.rr', ews.temporal_returnrate, path=path, nr_dim=nr_dim,
stack_x=stack_x, stack_y=stack_y)
# Temporal dfa TODO - returns 3-4 values, save only 1?
fpath = os.path.join(path + variable.name + '.t.dfa')
_, _, _, temporal_statistic = ews.temporal_dfa(stack_of_windows, window_size=variable.window_size)
# scales, fluct, coeff, propagator (== temporal statistic)
if nr_dim > 1:
temporal_statistic = temporal_statistic.reshape(stack_x, stack_y)
np.savetxt(fpath + '.numpy.txt', temporal_statistic)
# Temporal cond. het. TODO - returns 2 values, save only 1?
fpath = os.path.join(path + variable.name + '.t.coh')
save_p = True
if save_p and nr_dim == 1:
temporal_statistic = [[0.0], [0.0]]
statistic, p_val = ews.temporal_cond_het(stack_of_windows)
temporal_statistic[0] = statistic
temporal_statistic[1] = p_val
else:
temporal_statistic, _ = ews.temporal_cond_het(stack_of_windows) # _ is the p-value of the test, not saved
if nr_dim > 1:
temporal_statistic = temporal_statistic.reshape(stack_x, stack_y)
np.savetxt(fpath + '.numpy.txt', temporal_statistic)
# Initializing calculating and saving EWS for spatial data
"""
Initializes calculating early-warning signals and saving the results for spatial data.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
state_variable_maps : The spatial data from the model.
path : str, the filepath where the original dataset can be found.
"""
def spatial_ews_calculations(variable, state_variable_maps, path='./1/'):
ews_calc_and_save(variable, state_variable_maps, '.s.mn', ews.spatial_mean, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.std', ews.spatial_std, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.var', ews.spatial_var, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.skw', ews.spatial_skw, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.krt', ews.spatial_krt, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.mI', ews.spatial_corr, path=path)
# Initializing calculating and saving EWS for both spatial and temporal data
"""
Initializes calculating early-warning signals and saving the results for both temporal and spatial data.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
path : str, the filepath where the original dataset can be found.
timer_on : bool, selects whether calculation time is shown in the console.
"""
def ews_calculations_init(variable, path='./1/', timer_on=False):
if variable.temporal:
if cfg.mean_timeseries_data:
ews_calculations_main(variable, temporal_data_file_loading, temporal_ews_calculations, path=path,
timer_on=timer_on)
elif not cfg.mean_timeseries_data:
print(f"Mean timeseries data == False in the configuration, could not calculate EWS for "
f"{variable.name}.")
elif variable.spatial:
if cfg.map_data:
ews_calculations_main(variable, spatial_data_file_loading, spatial_ews_calculations, path=path,
timer_on=timer_on)
elif not cfg.map_data:
print(f"Map data == False in the configuration, could not calculate EWS for {variable.name}.")
# Initializing calculating and saving EWS for either spatial and temporal data
"""
Initializes calculating early-warning signals and saving the results for either temporal and spatial data.
Args:
-----
variable : The state variable from the variable class presented in EWS_StateVariables.py
loading_function : function, refers to temporal_data_file_loading() or spatial_data_file_loading().
calculation_function : function, refers to temporal_ews_calculations() or spatial_ews_calculations().
path : str, the filepath where the original dataset can be found.
timer_on : bool, selects whether calculation time is shown in the console.
"""
def ews_calculations_main(variable, loading_function, calculation_function, path='./1/', timer_on=False):
state_variable, files_present = loading_function(variable, path=path)
if files_present:
print(f"Started EWS calculations for {variable.name} in {path}")
if timer_on:
start_time = time.time()
calculation_function(variable, state_variable, path=path)
if timer_on:
end_time = time.time()
print(f"Elapsed time for EWS calculations for {variable.name} in {path} equals:", end_time - start_time,
'\n')
# EWS calculations & optional data generation and EWS calculations for results of the weekly model
"""
Starts calculations, optional data generation & calculations for results of the weekly model. Takes no arguments and
returns nothing, as settings from the configuration are used instead. Calculations are saved on disk.
"""
def EWS_weekly_calculations():
start_time = time.time()
for realization in range(1, cfg.nrOfSamples + 1):
for variable in variables:
ews_calculations_init(variable, path=f'./{realization}/', timer_on=True)
if cfg.generate_dummy_datasets:
generate_datasets_init(variable, path=f'./{realization}/', nr_realizations=cfg.nr_generated_datasets,
method1=cfg.method_1, method2=cfg.method_2, method3=cfg.method_3)
ews_calculations_generated_datasets_init(variable, path=f'./{realization}/',
nr_realizations=cfg.nr_generated_datasets,
timer_on=True, method1=cfg.method_1, method2=cfg.method_2,
method3=cfg.method_3)
end_time = time.time() - start_time
print(f"Total elapsed time equals: {end_time} seconds")
# EWS calculations & optional data generation and EWS calculations for results of the hourly model
"""
Starts calculations, optional data generation & calculations for results of the hourly model. Takes no arguments and
returns nothing, as settings from the configuration are used instead. Calculations are saved on disk.
"""
def EWS_hourly_calculations():
start_time = time.time()
for i in range(cfg.stepsTotal):
fpath = str(i).zfill(2)
for variable in variables:
ews_calculations_init(variable, path=f'./h{fpath}/', timer_on=True)
end_time = time.time() - start_time
print(f"Total elapsed time equals: {end_time} seconds")
EWS_weekly_calculations()
| 40.444984
| 126
| 0.680936
|
from pcraster import *
import numpy as np
import os
import time
import EWSPy as ews
import EWS_configuration as cfg
import NULL_models_timeseries_weekly as temp_NULL
import NULL_models_spatial_weekly as spat_NULL
import EWS_StateVariables as ews_sv
variables = ews_sv.variables_weekly
if not cfg.cutoff:
spatial_ews_interval = np.arange(cfg.interval_map_snapshots, cfg.number_of_timesteps_weekly +
cfg.interval_map_snapshots, cfg.interval_map_snapshots)
elif cfg.cutoff:
spatial_ews_interval = np.arange(cfg.interval_map_snapshots, cfg.cutoff_point + cfg.interval_map_snapshots,
cfg.interval_map_snapshots)
def time_series2time_windows(timeseries, window_size=100, window_overlap=0):
actual_window_overlap = window_size - window_overlap
sh = (timeseries.size - window_size + 1, window_size)
st = timeseries.strides * 2
if window_overlap != 0:
view = np.lib.stride_tricks.as_strided(timeseries, strides=st, shape=sh)[0::actual_window_overlap]
elif window_overlap == 0:
view = np.lib.stride_tricks.as_strided(timeseries, strides=st, shape=sh)[0::window_size]
return view
def generate_datasets_init(variable, path='./1/', nr_realizations=1, method1=False, method2=False, method3=False):
if variable.temporal:
state_variable_timeseries, files_present = temporal_data_file_loading(variable, path=path)
if files_present:
if state_variable_timeseries.ndim == 1:
state_variable_timeseries = generate_datasets_main(variable, state_variable_timeseries,
temp_NULL.detrend_, nr_realizations=nr_realizations,
path=path)
if method1:
generate_datasets_main(variable, state_variable_timeseries, temp_NULL.method1_,
nr_realizations=nr_realizations, path=path)
if method2:
generate_datasets_main(variable, state_variable_timeseries, temp_NULL.method2_,
nr_realizations=nr_realizations, path=path)
if method3:
generate_datasets_main(variable, state_variable_timeseries, temp_NULL.method3_,
nr_realizations=nr_realizations, path=path)
else:
print(f"Multiple dimensions are currently not supported for generated datasets, so no datasets are being "
f"generated for {variable.name}.")
if variable.spatial:
state_variable_snapshots, files_present = spatial_data_file_loading(variable, path=path)
if files_present:
state_variable_snapshots = np.asarray(state_variable_snapshots)
state_variable_snapshots = generate_datasets_main(variable, state_variable_snapshots, spat_NULL.detrend_,
nr_realizations=nr_realizations, path=path)
if method1:
generate_datasets_main(variable, state_variable_snapshots, spat_NULL.method1_,
nr_realizations=nr_realizations, path=path)
if method2:
generate_datasets_main(variable, state_variable_snapshots, spat_NULL.method2_,
nr_realizations=nr_realizations, path=path)
if method3:
generate_datasets_main(variable, state_variable_snapshots, spat_NULL.method3_,
nr_realizations=nr_realizations, path=path)
def generate_datasets_main(variable, state_variable, method, nr_realizations=1, path='./1/'):
print(f"Started generating dataset(s) for {variable.name} using {method.__name__}")
detrended_data = method(state_variable, realizations=nr_realizations, path=path, file_name=variable.name)
print(f"Finished generating dataset(s) for {variable.name} using {method.__name__} \n")
if method.__name__ == temp_NULL.detrend_.__name__:
return detrended_data
def ews_calculations_generated_datasets_init(variable, path='./1/', nr_realizations=1, timer_on=False, method1=False,
method2=False, method3=False):
generated_number_length = ews.generated_number_length(nr_realizations)
if cfg.save_detrended_data and variable.temporal:
ews_calculations_generated_datasets_main(variable, 'dtr', gen_nr_len=generated_number_length, path=path,
nr_realizations=1, timer_on=timer_on)
if method1:
ews_calculations_generated_datasets_main(variable, 'm1g', gen_nr_len=generated_number_length, path=path,
nr_realizations=nr_realizations, timer_on=timer_on)
if method2:
ews_calculations_generated_datasets_main(variable, 'm2g', gen_nr_len=generated_number_length, path=path,
nr_realizations=nr_realizations, timer_on=timer_on)
if method3:
ews_calculations_generated_datasets_main(variable, 'm3g', gen_nr_len=generated_number_length, path=path,
nr_realizations=nr_realizations, timer_on=timer_on)
def ews_calculations_generated_datasets_main(variable, method, gen_nr_len=4, path='./1/', nr_realizations=1, timer_on=False):
for realization in range(nr_realizations):
generated_number_string = method + str(realization).zfill(gen_nr_len) + '/'
dir_name = os.path.join(path + generated_number_string)
ews_calculations_init(variable, path=dir_name, timer_on=timer_on)
def temporal_data_file_loading(variable, path='./1/'):
state_variable_timeseries = []
EWS_calculations = True
if variable.datatype == 'numpy':
file_name = ews.file_name_str(variable.name, cfg.number_of_timesteps_weekly)
if os.path.exists(path + file_name + ".numpy.txt"):
state_variable_timeseries = np.loadtxt(path + file_name + ".numpy.txt")
else:
print(f"{file_name}.numpy.txt not found in {path}")
EWS_calculations = False
else:
print(f"Datatype for {variable.name} currently not supported.")
EWS_calculations = False
return state_variable_timeseries, EWS_calculations
def window_stacker(variable, state_variable_timeseries):
nr_dim = state_variable_timeseries.ndim
if nr_dim == 1:
if cfg.cutoff:
state_variable_timeseries = state_variable_timeseries[:cfg.cutoff_point]
stack_of_windows = time_series2time_windows(state_variable_timeseries, variable.window_size,
variable.window_overlap)
stack_x, stack_y = np.asarray(stack_of_windows).shape
else:
stack_of_windows = [0.0] * np.asarray(state_variable_timeseries).shape[1]
for k, timeseries in enumerate(state_variable_timeseries.T):
if cfg.cutoff:
stack_of_windows[k] = time_series2time_windows(timeseries[:cfg.cutoff_point],
variable.window_size, variable.window_overlap)
elif not cfg.cutoff:
stack_of_windows[k] = time_series2time_windows(timeseries, variable.window_size,
variable.window_overlap)
stack_x, stack_y, stack_z = np.asarray(stack_of_windows).shape
stack_of_windows = np.asarray(stack_of_windows).reshape(-1, stack_z)
return stack_of_windows, nr_dim, stack_x, stack_y
def spatial_data_file_loading(variable, path='./1/'):
state_variable_snapshots = [0.0] * len(spatial_ews_interval)
EWS_calculations = True
if variable.datatype == 'numpy':
for k, timestep in enumerate(spatial_ews_interval):
file_name = ews.file_name_str(variable.name, timestep)
if os.path.exists(path + file_name + ".numpy.txt"):
state_variable_snapshots[k] = np.loadtxt(path + file_name + 'numpy.txt')
else:
print(f"{file_name}.numpy.txt not found in {path}.")
EWS_calculations = False
if variable.datatype == 'map':
for k, timestep in enumerate(spatial_ews_interval):
file_name = ews.file_name_str(variable.name, timestep)
if os.path.exists(path + file_name):
state_variable_snapshots[k] = pcr2numpy(readmap(path + file_name), np.NaN)
else:
print(f"{file_name} not found in {path}.")
EWS_calculations = False
else:
print(f"Datatype for {variable.name} currently not supported.")
EWS_calculations = False
return state_variable_snapshots, EWS_calculations
def ews_calc_and_save(variable, data, method_name, method_function, path='./1/', nr_dim=1, stack_x=1, stack_y=1):
fpath = os.path.join(path + variable.name + method_name)
signal = method_function(data)
if nr_dim > 1:
signal = signal.reshape(stack_x, stack_y)
np.savetxt(fpath + '.numpy.txt', signal)
def temporal_ews_calculations(variable, state_variable_timeseries, path='./1/'):
stack_of_windows, nr_dim, stack_x, stack_y = window_stacker(variable, state_variable_timeseries)
ews_calc_and_save(variable, stack_of_windows, '.t.mn', ews.temporal_mean, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.std', ews.temporal_std, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.var', ews.temporal_var, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.cv', ews.temporal_cv, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.skw', ews.temporal_skw, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.krt', ews.temporal_krt, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.acr', ews.temporal_autocorrelation, path=path, nr_dim=nr_dim,
stack_x=stack_x, stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.AR1', ews.temporal_AR1, path=path, nr_dim=nr_dim, stack_x=stack_x,
stack_y=stack_y)
ews_calc_and_save(variable, stack_of_windows, '.t.rr', ews.temporal_returnrate, path=path, nr_dim=nr_dim,
stack_x=stack_x, stack_y=stack_y)
fpath = os.path.join(path + variable.name + '.t.dfa')
_, _, _, temporal_statistic = ews.temporal_dfa(stack_of_windows, window_size=variable.window_size)
if nr_dim > 1:
temporal_statistic = temporal_statistic.reshape(stack_x, stack_y)
np.savetxt(fpath + '.numpy.txt', temporal_statistic)
fpath = os.path.join(path + variable.name + '.t.coh')
save_p = True
if save_p and nr_dim == 1:
temporal_statistic = [[0.0], [0.0]]
statistic, p_val = ews.temporal_cond_het(stack_of_windows)
temporal_statistic[0] = statistic
temporal_statistic[1] = p_val
else:
temporal_statistic, _ = ews.temporal_cond_het(stack_of_windows)
if nr_dim > 1:
temporal_statistic = temporal_statistic.reshape(stack_x, stack_y)
np.savetxt(fpath + '.numpy.txt', temporal_statistic)
def spatial_ews_calculations(variable, state_variable_maps, path='./1/'):
ews_calc_and_save(variable, state_variable_maps, '.s.mn', ews.spatial_mean, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.std', ews.spatial_std, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.var', ews.spatial_var, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.skw', ews.spatial_skw, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.krt', ews.spatial_krt, path=path)
ews_calc_and_save(variable, state_variable_maps, '.s.mI', ews.spatial_corr, path=path)
def ews_calculations_init(variable, path='./1/', timer_on=False):
if variable.temporal:
if cfg.mean_timeseries_data:
ews_calculations_main(variable, temporal_data_file_loading, temporal_ews_calculations, path=path,
timer_on=timer_on)
elif not cfg.mean_timeseries_data:
print(f"Mean timeseries data == False in the configuration, could not calculate EWS for "
f"{variable.name}.")
elif variable.spatial:
if cfg.map_data:
ews_calculations_main(variable, spatial_data_file_loading, spatial_ews_calculations, path=path,
timer_on=timer_on)
elif not cfg.map_data:
print(f"Map data == False in the configuration, could not calculate EWS for {variable.name}.")
def ews_calculations_main(variable, loading_function, calculation_function, path='./1/', timer_on=False):
state_variable, files_present = loading_function(variable, path=path)
if files_present:
print(f"Started EWS calculations for {variable.name} in {path}")
if timer_on:
start_time = time.time()
calculation_function(variable, state_variable, path=path)
if timer_on:
end_time = time.time()
print(f"Elapsed time for EWS calculations for {variable.name} in {path} equals:", end_time - start_time,
'\n')
def EWS_weekly_calculations():
start_time = time.time()
for realization in range(1, cfg.nrOfSamples + 1):
for variable in variables:
ews_calculations_init(variable, path=f'./{realization}/', timer_on=True)
if cfg.generate_dummy_datasets:
generate_datasets_init(variable, path=f'./{realization}/', nr_realizations=cfg.nr_generated_datasets,
method1=cfg.method_1, method2=cfg.method_2, method3=cfg.method_3)
ews_calculations_generated_datasets_init(variable, path=f'./{realization}/',
nr_realizations=cfg.nr_generated_datasets,
timer_on=True, method1=cfg.method_1, method2=cfg.method_2,
method3=cfg.method_3)
end_time = time.time() - start_time
print(f"Total elapsed time equals: {end_time} seconds")
def EWS_hourly_calculations():
start_time = time.time()
for i in range(cfg.stepsTotal):
fpath = str(i).zfill(2)
for variable in variables:
ews_calculations_init(variable, path=f'./h{fpath}/', timer_on=True)
end_time = time.time() - start_time
print(f"Total elapsed time equals: {end_time} seconds")
EWS_weekly_calculations()
| true
| true
|
1c411b9db3f142e3d0cae1ec92eac77c0a059fdb
| 6,036
|
py
|
Python
|
thought_lounge/models.py
|
simplykhanh/thoughtloungev2
|
e26f3fd7cf1a179622a4b20ae77dd2aa1bd5daa6
|
[
"MIT"
] | null | null | null |
thought_lounge/models.py
|
simplykhanh/thoughtloungev2
|
e26f3fd7cf1a179622a4b20ae77dd2aa1bd5daa6
|
[
"MIT"
] | 1
|
2015-10-27T00:22:51.000Z
|
2018-11-20T19:02:47.000Z
|
thought_lounge/models.py
|
simplykhanh/thoughtloungev2
|
e26f3fd7cf1a179622a4b20ae77dd2aa1bd5daa6
|
[
"MIT"
] | 1
|
2015-10-27T00:11:22.000Z
|
2015-10-27T00:11:22.000Z
|
from thought_lounge import db
import uuid, datetime
from datetime import timezone
from passlib.apps import custom_app_context as pwd_context
class Key(db.Model):
id = db.Column(db.Integer, primary_key = True)
key = db.Column(db.String, unique = True, nullable = False)
def __init__(self):
self.key = str(uuid.uuid4())
def __repr__(self):
return '<Key {0}>'.format(self.key)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String, nullable = False, unique = True)
password = db.Column(db.String, nullable = False, server_default = '')
first_name = db.Column(db.String)
last_name = db.Column(db.String)
bio = db.Column(db.String)
# 'host' or 'lounger' or 'admin'
role = db.Column(db.String)
# in number of weeks
# 0 = never, 1 = every week, 2 = every two weeks, etc.
notifications = db.Column(db.Integer, nullable = False)
picture_id = db.Column(db.Integer, db.ForeignKey('picture.id'))
picture = db.relationship('Picture')
user_lounges = db.relationship('UserLounge', backref = 'user')
#verification_code = db.Column(db.Integer, unique = True, server_default = None)
host_applications = db.relationship('HostApplication', backref = 'user', lazy = 'dynamic', cascade = 'all, delete-orphan')
key_id = db.Column(db.Integer, db.ForeignKey('key.id'))
key = db.relationship('Key', backref = db.backref('user', uselist = False))
def hash_password(self, plaintext_password):
self.password = pwd_context.encrypt(plaintext_password)
def verify_password(self, plaintext_password):
return pwd_context.verify(plaintext_password, self.password)
def __init__(self, email, password, first_name = None, last_name = None, bio = None, role = 'lounger', notifications = 2):
self.email = email
self.hash_password(password)
self.first_name = first_name
self.last_name = last_name
self.bio = bio
self.role = role
self.notifications = notifications
self.key = Key()
def __repr__(self):
return '<User {0}: {1} {2}>'.format(self.email, self.first_name, self.last_name)
class UserLounge(db.Model):
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key = True)
lounge_id = db.Column(db.Integer, db.ForeignKey('lounge.id'), primary_key = True)
lounge = db.relationship('Lounge', backref = 'user_lounges')
is_host = db.Column(db.Boolean)
topic = db.Column(db.String)
summary = db.Column(db.String)
showed_up = db.Column(db.Boolean)
def __init__(self, is_host, topic = None, summary = None, showed_up = None):
self.is_host = is_host
self.summary = summary
self.topic = topic
self.showed_up = showed_up
def __repr__(self):
return '<UserLounge {0}: hosting {1}>'.format(self.topic, self.is_host)
class Lounge(db.Model):
id = db.Column(db.Integer, primary_key = True)
date_time = db.Column(db.DateTime, nullable = False)
location = db.Column(db.String)
campus = db.Column(db.String) #CHANGE
community = db.Column(db.String)
is_reserved = db.Column(db.Boolean, nullable = False)
topic = db.Column(db.String)
summary = db.Column(db.String)
pictures = db.relationship('Picture', backref = 'lounge', lazy = 'dynamic', cascade = 'all, delete-orphan')
@property
def host(self):
try:
return [lounge_user.user for lounge_user in self.user_lounges if lounge_user.is_host][0]
except IndexError:
return None
@property
def local_date_time(self):
return self.date_time.replace(tzinfo = timezone.utc).astimezone(tz = None)
@property
def formatted_local_date_time(self):
return self.local_date_time.strftime('%A, %d %B at %I:%M %p')
def __init__(self, date_time, is_reserved, location = None, community = None, topic = None, summary = None):
self.date_time = date_time
self.location = location
self.community = community
self.is_reserved = is_reserved
self.topic = topic
self.summary = summary
def __repr__(self):
return '<Lounge {0}>'.format(self.date_time)
class Picture(db.Model):
id = db.Column(db.Integer, primary_key = True)
extension = db.Column(db.String)
description = db.Column(db.String)
lounge_id = db.Column(db.Integer, db.ForeignKey('lounge.id'))
event_id = db.Column(db.Integer, db.ForeignKey('event.id'))
def __init__(self, extension, description = None):
self.extension = extension
self.description = description
def __repr__(self):
return '<Picture {0}.{1}>'.format(self.id, self.extension)
class Event(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String, nullable = False)
description = db.Column(db.String)
date_time = db.Column(db.DateTime)
# Google Maps-able
location = db.Column(db.String)
pictures = db.relationship('Picture', backref = 'event', lazy = 'dynamic', cascade = 'all, delete-orphan')
# Markdown
article = db.Column(db.String)
def __init__(self, title, description = None, date_time = None, location = None, article = None):
self.title = title
self.description = description
self.date_time = date_time
self.location = location
self.article = article
def __repr__(self):
return '<Event {0}: {1}>'.format(self.title, self.date_time)
class HostApplication(db.Model):
id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
application = db.Column(db.String, nullable = False)
is_approved = db.Column(db.Boolean)
def __init__(self, application, is_approved = None):
self.application = application
self.is_approved = is_approved
def __repr__(self):
return '<HostApplication {0}>'.format(self.is_approved)
| 32.627027
| 126
| 0.661199
|
from thought_lounge import db
import uuid, datetime
from datetime import timezone
from passlib.apps import custom_app_context as pwd_context
class Key(db.Model):
id = db.Column(db.Integer, primary_key = True)
key = db.Column(db.String, unique = True, nullable = False)
def __init__(self):
self.key = str(uuid.uuid4())
def __repr__(self):
return '<Key {0}>'.format(self.key)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
email = db.Column(db.String, nullable = False, unique = True)
password = db.Column(db.String, nullable = False, server_default = '')
first_name = db.Column(db.String)
last_name = db.Column(db.String)
bio = db.Column(db.String)
role = db.Column(db.String)
notifications = db.Column(db.Integer, nullable = False)
picture_id = db.Column(db.Integer, db.ForeignKey('picture.id'))
picture = db.relationship('Picture')
user_lounges = db.relationship('UserLounge', backref = 'user')
host_applications = db.relationship('HostApplication', backref = 'user', lazy = 'dynamic', cascade = 'all, delete-orphan')
key_id = db.Column(db.Integer, db.ForeignKey('key.id'))
key = db.relationship('Key', backref = db.backref('user', uselist = False))
def hash_password(self, plaintext_password):
self.password = pwd_context.encrypt(plaintext_password)
def verify_password(self, plaintext_password):
return pwd_context.verify(plaintext_password, self.password)
def __init__(self, email, password, first_name = None, last_name = None, bio = None, role = 'lounger', notifications = 2):
self.email = email
self.hash_password(password)
self.first_name = first_name
self.last_name = last_name
self.bio = bio
self.role = role
self.notifications = notifications
self.key = Key()
def __repr__(self):
return '<User {0}: {1} {2}>'.format(self.email, self.first_name, self.last_name)
class UserLounge(db.Model):
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key = True)
lounge_id = db.Column(db.Integer, db.ForeignKey('lounge.id'), primary_key = True)
lounge = db.relationship('Lounge', backref = 'user_lounges')
is_host = db.Column(db.Boolean)
topic = db.Column(db.String)
summary = db.Column(db.String)
showed_up = db.Column(db.Boolean)
def __init__(self, is_host, topic = None, summary = None, showed_up = None):
self.is_host = is_host
self.summary = summary
self.topic = topic
self.showed_up = showed_up
def __repr__(self):
return '<UserLounge {0}: hosting {1}>'.format(self.topic, self.is_host)
class Lounge(db.Model):
id = db.Column(db.Integer, primary_key = True)
date_time = db.Column(db.DateTime, nullable = False)
location = db.Column(db.String)
campus = db.Column(db.String)
community = db.Column(db.String)
is_reserved = db.Column(db.Boolean, nullable = False)
topic = db.Column(db.String)
summary = db.Column(db.String)
pictures = db.relationship('Picture', backref = 'lounge', lazy = 'dynamic', cascade = 'all, delete-orphan')
@property
def host(self):
try:
return [lounge_user.user for lounge_user in self.user_lounges if lounge_user.is_host][0]
except IndexError:
return None
@property
def local_date_time(self):
return self.date_time.replace(tzinfo = timezone.utc).astimezone(tz = None)
@property
def formatted_local_date_time(self):
return self.local_date_time.strftime('%A, %d %B at %I:%M %p')
def __init__(self, date_time, is_reserved, location = None, community = None, topic = None, summary = None):
self.date_time = date_time
self.location = location
self.community = community
self.is_reserved = is_reserved
self.topic = topic
self.summary = summary
def __repr__(self):
return '<Lounge {0}>'.format(self.date_time)
class Picture(db.Model):
id = db.Column(db.Integer, primary_key = True)
extension = db.Column(db.String)
description = db.Column(db.String)
lounge_id = db.Column(db.Integer, db.ForeignKey('lounge.id'))
event_id = db.Column(db.Integer, db.ForeignKey('event.id'))
def __init__(self, extension, description = None):
self.extension = extension
self.description = description
def __repr__(self):
return '<Picture {0}.{1}>'.format(self.id, self.extension)
class Event(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String, nullable = False)
description = db.Column(db.String)
date_time = db.Column(db.DateTime)
location = db.Column(db.String)
pictures = db.relationship('Picture', backref = 'event', lazy = 'dynamic', cascade = 'all, delete-orphan')
article = db.Column(db.String)
def __init__(self, title, description = None, date_time = None, location = None, article = None):
self.title = title
self.description = description
self.date_time = date_time
self.location = location
self.article = article
def __repr__(self):
return '<Event {0}: {1}>'.format(self.title, self.date_time)
class HostApplication(db.Model):
id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
application = db.Column(db.String, nullable = False)
is_approved = db.Column(db.Boolean)
def __init__(self, application, is_approved = None):
self.application = application
self.is_approved = is_approved
def __repr__(self):
return '<HostApplication {0}>'.format(self.is_approved)
| true
| true
|
1c411be21f8294258120d22a8cf551730fa1c2c7
| 61,611
|
py
|
Python
|
pyhamtools/lookuplib.py
|
threeio/pyhamtools
|
0fa24e97416153f669987cc90e7c29ad16eec7b4
|
[
"MIT"
] | null | null | null |
pyhamtools/lookuplib.py
|
threeio/pyhamtools
|
0fa24e97416153f669987cc90e7c29ad16eec7b4
|
[
"MIT"
] | null | null | null |
pyhamtools/lookuplib.py
|
threeio/pyhamtools
|
0fa24e97416153f669987cc90e7c29ad16eec7b4
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import os
import logging
import logging.config
import re
import random, string
from datetime import datetime
import xml.etree.ElementTree as ET
import urllib
import json
import copy
import sys
import unicodedata
import requests
from requests.exceptions import ConnectionError, HTTPError, Timeout
from bs4 import BeautifulSoup
import pytz
from . import version
from .consts import LookupConventions as const
from .exceptions import APIKeyMissingError
UTC = pytz.UTC
REDIS_LUA_DEL_SCRIPT = "local keys = redis.call('keys', ARGV[1]) \n for i=1,#keys,20000 do \n redis.call('del', unpack(keys, i, math.min(i+19999, #keys))) \n end \n return keys"
if sys.version_info < (2, 7,):
class NullHandler(logging.Handler):
def emit(self, record):
pass
if sys.version_info.major == 3:
unicode = str
class LookupLib(object):
"""
This class is a wrapper for the following three Amateur Radio databases:
1. Clublog.org (daily updated XML File)
2. Clublog.org (HTTPS lookup)
3. Country-files.com (infrequently updated PLIST File)
4. QRZ.com (HTTP / XML Lookup)
It's aim is to provide a homogeneous interface to different databases.
Typically an instance of this class is injected as a dependency in the :py:class:`Callinfo` class, but it can also
be used directly.
Even the interface is the same for all lookup sources, the returning data can be different.
The documentation of the various methods provide more detail.
By default, LookupLib requires an Internet connection to download the libraries or perform the
lookup against the Clublog API or QRZ.com.
The entire lookup data (where database files are downloaded) can also be copied into Redis, which an extremely
fast in-memory Key/Value store. A LookupLib object can be instanciated to perform then all lookups in Redis,
instead processing and loading the data from Internet / File. This saves some time and allows several instances
of :py:class:`LookupLib` to query the same data concurrently.
Args:
lookuptype (str) : "clublogxml" or "clublogapi" or "countryfile" or "redis" or "qrz"
apikey (str): Clublog API Key
username (str): QRZ.com username
pwd (str): QRZ.com password
apiv (str, optional): QRZ.com API Version
filename (str, optional): Filename for Clublog XML or Country-files.com cty.plist file. When a local file is
used, no Internet connection not API Key is necessary.
logger (logging.getLogger(__name__), optional): Python logger
redis_instance (redis.Redis(), optional): Instance of Redis
redis_prefix (str, optional): Prefix to identify the lookup data set in Redis
"""
def __init__(self, lookuptype = "countryfile", apikey=None, apiv="1.3.3", filename=None, logger=None, username=None, pwd=None, redis_instance=None, redis_prefix=None):
self._logger = None
if logger:
self._logger = logger
else:
self._logger = logging.getLogger(__name__)
if sys.version_info[:2] == (2, 6):
self._logger.addHandler(NullHandler())
else:
self._logger.addHandler(logging.NullHandler())
self._apikey = apikey
self._apiv = apiv
self._download = True
self._lib_filename = filename
self._redis = redis_instance
self._redis_prefix = redis_prefix
self._username = username
self._pwd = pwd
if self._lib_filename:
self._download = False
self._callsign_exceptions_index = {}
self._invalid_operations_index = {}
self._zone_exceptions_index = {}
self._entities = {}
self._callsign_exceptions = {}
self._invalid_operations = {}
self._zone_exceptions = {}
self._lookuptype = lookuptype
if self._lookuptype == "clublogxml":
self._load_clublogXML(apikey=self._apikey, cty_file=self._lib_filename)
elif self._lookuptype == "countryfile":
self._load_countryfile(cty_file=self._lib_filename)
elif self._lookuptype == "clublogapi":
pass
elif self._lookuptype == "redis":
import redis
elif self._lookuptype == "qrz":
self._apikey = self._get_qrz_session_key(self._username, self._pwd)
else:
raise AttributeError("Lookup type missing")
def _get_qrz_session_key(self, username, pwd):
qrz_api_version = "1.3.3"
url = "https://xmldata.qrz.com/xml/" + qrz_api_version + "/"
agent = "PyHamTools"+version.__version__
params = {"username" : username,
"password" : pwd,
"agent" : agent
}
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=10)
doc = BeautifulSoup(response.text, "html.parser")
session_key = None
if doc.session.key:
session_key = doc.session.key.text
else:
if doc.session.error:
raise ValueError(doc.session.error.text)
else:
raise ValueError("Could not retrieve Session Key from QRZ.com")
return session_key
def copy_data_in_redis(self, redis_prefix, redis_instance):
"""
Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redis
Returns:
bool: returns True when the data has been copied successfully into Redis
Example:
Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running
instance of Redis, as well the python Redis connector (pip install redis-py).
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> print my_lookuplib.copy_data_in_redis(redis_prefix="CF", redis_instance=r)
True
Now let's create an instance of LookupLib, using Redis to query the data
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile", redis_instance=r, redis_prefix="CF")
>>> my_lookuplib.lookup_callsign("3D2RI")
{
u'adif': 460,
u'continent': u'OC',
u'country': u'Rotuma Island',
u'cqz': 32,
u'ituz': 56,
u'latitude': -12.48,
u'longitude': 177.08
}
Note:
This method is available for the following lookup type
- clublogxml
- countryfile
"""
if redis_instance is not None:
self._redis = redis_instance
if self._redis is None:
raise AttributeError("redis_instance is missing")
if redis_prefix is None:
raise KeyError("redis_prefix is missing")
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
self._push_dict_to_redis(self._entities, redis_prefix, "_entity_")
self._push_dict_index_to_redis(self._callsign_exceptions_index, redis_prefix, "_call_ex_index_")
self._push_dict_to_redis(self._callsign_exceptions, redis_prefix, "_call_ex_")
self._push_dict_index_to_redis(self._prefixes_index, redis_prefix, "_prefix_index_")
self._push_dict_to_redis(self._prefixes, redis_prefix, "_prefix_")
self._push_dict_index_to_redis(self._invalid_operations_index, redis_prefix, "_inv_op_index_")
self._push_dict_to_redis(self._invalid_operations, redis_prefix, "_inv_op_")
self._push_dict_index_to_redis(self._zone_exceptions_index, redis_prefix, "_zone_ex_index_")
self._push_dict_to_redis(self._zone_exceptions, redis_prefix, "_zone_ex_")
return True
def _push_dict_to_redis(self, push_dict, redis_prefix, name):
r = self._redis
pipe = r.pipeline()
pipe.eval(REDIS_LUA_DEL_SCRIPT, 0, redis_prefix + name)
for i in push_dict:
json_data = self._serialize_data(push_dict[i])
pipe.set(redis_prefix + name + str(i), json_data)
pipe.execute()
return True
def _push_dict_index_to_redis(self, index_dict, redis_prefix, name):
r = self._redis
pipe = r.pipeline()
pipe.eval(REDIS_LUA_DEL_SCRIPT, 0, redis_prefix + name)
for i in index_dict:
for el in index_dict[i]:
pipe.sadd(redis_prefix + name + str(i), el)
pipe.execute()
return True
def lookup_entity(self, entity=None):
"""Returns lookup data of an ADIF Entity
Args:
entity (int): ADIF identifier of country
Returns:
dict: Dictionary containing the country specific data
Raises:
KeyError: No matching entity found
Example:
The following code queries the the Clublog XML database for the ADIF entity Turkmenistan, which has
the id 273.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> print my_lookuplib.lookup_entity(273)
{
'deleted': False,
'country': u'TURKMENISTAN',
'longitude': 58.4,
'cqz': 17,
'prefix': u'EZ',
'latitude': 38.0,
'continent': u'AS'
}
Note:
This method is available for the following lookup type
- clublogxml
- redis
- qrz.com
"""
if self._lookuptype == "clublogxml":
entity = int(entity)
if entity in self._entities:
return self._strip_metadata(self._entities[entity])
else:
raise KeyError
elif self._lookuptype == "redis":
if self._redis_prefix is None:
raise KeyError ("redis_prefix is missing")
#entity = str(entity)
json_data = self._redis.get(self._redis_prefix + "_entity_" + str(entity))
if json_data is not None:
my_dict = self._deserialize_data(json_data)
return self._strip_metadata(my_dict)
elif self._lookuptype == "qrz":
result = self._lookup_qrz_dxcc(entity, self._apikey)
return result
# no matching case
raise KeyError
def _strip_metadata(self, my_dict):
"""
Create a copy of dict and remove not needed data
"""
new_dict = copy.deepcopy(my_dict)
if const.START in new_dict:
del new_dict[const.START]
if const.END in new_dict:
del new_dict[const.END]
if const.WHITELIST in new_dict:
del new_dict[const.WHITELIST]
if const.WHITELIST_START in new_dict:
del new_dict[const.WHITELIST_START]
if const.WHITELIST_END in new_dict:
del new_dict[const.WHITELIST_END]
return new_dict
def lookup_callsign(self, callsign=None, timestamp=None):
"""
Returns lookup data if an exception exists for a callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the callsign
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code queries the the online Clublog API for the callsign "VK9XO" on a specific date.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> timestamp = datetime(year=1962, month=7, day=7, tzinfo=pytz.UTC)
>>> print my_lookuplib.lookup_callsign("VK9XO", timestamp)
{
'country': u'CHRISTMAS ISLAND',
'longitude': 105.7,
'cqz': 29,
'adif': 35,
'latitude': -10.5,
'continent': u'OC'
}
Note:
This method is available for
- clublogxml
- clublogapi
- countryfile
- qrz.com
- redis
"""
callsign = callsign.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogapi":
callsign_data = self._lookup_clublogAPI(callsign=callsign, timestamp=timestamp, apikey=self._apikey)
if callsign_data[const.ADIF]==1000:
raise KeyError
else:
return callsign_data
elif self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(callsign, timestamp, self._callsign_exceptions, self._callsign_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_call_ex_", "_call_ex_index_", self._redis_prefix, callsign)
return self._check_data_for_date(callsign, timestamp, data_dict, index)
# no matching case
elif self._lookuptype == "qrz":
return self._lookup_qrz_callsign(callsign, self._apikey, self._apiv)
raise KeyError("unknown Callsign")
def _get_dicts_from_redis(self, name, index_name, redis_prefix, item):
"""
Retrieve the data of an item from redis and put it in an index and data dictionary to match the
common query interface.
"""
r = self._redis
data_dict = {}
data_index_dict = {}
if redis_prefix is None:
raise KeyError ("redis_prefix is missing")
if r.scard(redis_prefix + index_name + str(item)) > 0:
data_index_dict[str(item)] = r.smembers(redis_prefix + index_name + str(item))
for i in data_index_dict[item]:
json_data = r.get(redis_prefix + name + str(int(i)))
data_dict[i] = self._deserialize_data(json_data)
return (data_dict, data_index_dict)
raise KeyError ("No Data found in Redis for "+ item)
def _check_data_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the item is found in the index. An entry in the index points to the data
in the data_dict. This is mainly used retrieve callsigns and prefixes.
In case data is found for item, a dict containing the data is returned. Otherwise a KeyError is raised.
"""
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
return item_data
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.END]
return item_data
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
del item_data[const.END]
return item_data
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item]
raise KeyError
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the callsign is marked as an invalid operation for a given timestamp.
In case the operation is invalid, True is returned. Otherwise a KeyError is raised.
"""
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return True
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return True
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return True
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return True
raise KeyError
def lookup_prefix(self, prefix, timestamp=None):
"""
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
"""
prefix = prefix.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
# no matching case
raise KeyError
def is_invalid_operation(self, callsign, timestamp=None):
"""
Returns True if an operations is known as invalid
Args:
callsign (string): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True if a record exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if the operation is valid for two dates.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.is_invalid_operation("5W1CFN")
True
>>> try:
>>> timestamp = datetime(year=2012, month=1, day=31).replace(tzinfo=pytz.UTC)
>>> my_lookuplib.is_invalid_operation("5W1CFN", timestamp)
>>> except KeyError:
>>> print "Seems to be invalid operation before 31.1.2012"
Seems to be an invalid operation before 31.1.2012
Note:
This method is available for
- clublogxml
- redis
"""
callsign = callsign.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogxml":
return self._check_inv_operation_for_date(callsign, timestamp, self._invalid_operations, self._invalid_operations_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_inv_op_", "_inv_op_index_", self._redis_prefix, callsign)
return self._check_inv_operation_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError
def _check_zone_exception_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks the index and data if a cq-zone exception exists for the callsign
When a zone exception is found, the zone is returned. If no exception is found
a KeyError is raised
"""
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return data_dict[item][const.CQZ]
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item][const.CQZ]
raise KeyError
def lookup_zone_exception(self, callsign, timestamp=None):
"""
Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis
"""
callsign = callsign.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogxml":
return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign)
return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError
def _lookup_clublogAPI(self, callsign=None, timestamp=None, url="https://secure.clublog.org/dxcc", apikey=None):
""" Set up the Lookup object for Clublog Online API
"""
params = {"year" : timestamp.strftime("%Y"),
"month" : timestamp.strftime("%m"),
"day" : timestamp.strftime("%d"),
"hour" : timestamp.strftime("%H"),
"minute" : timestamp.strftime("%M"),
"api" : apikey,
"full" : "1",
"call" : callsign
}
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=5)
if not self._check_html_response(response):
raise LookupError
jsonLookup = response.json()
lookup = {}
for item in jsonLookup:
if item == "Name": lookup[const.COUNTRY] = jsonLookup["Name"]
elif item == "DXCC": lookup[const.ADIF] = int(jsonLookup["DXCC"])
elif item == "Lon": lookup[const.LONGITUDE] = float(jsonLookup["Lon"])*(-1)
elif item == "Lat": lookup[const.LATITUDE] = float(jsonLookup["Lat"])
elif item == "CQZ": lookup[const.CQZ] = int(jsonLookup["CQZ"])
elif item == "Continent": lookup[const.CONTINENT] = jsonLookup["Continent"]
if lookup[const.ADIF] == 0:
raise KeyError
else:
return lookup
def _request_callsign_info_from_qrz(self, callsign, apikey, apiv="1.3.3"):
qrz_api_version = apiv
url = "https://xmldata.qrz.com/xml/" + qrz_api_version + "/"
params = {
"s": apikey,
"callsign" : callsign,
}
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=5)
return response
def _request_dxcc_info_from_qrz(self, dxcc_or_callsign, apikey, apiv="1.3.3"):
qrz_api_version = apiv
url = "https://xmldata.qrz.com/xml/" + qrz_api_version + "/"
params = {
"s": apikey,
"dxcc" : str(dxcc_or_callsign),
}
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=5)
return response
def _lookup_qrz_dxcc(self, dxcc_or_callsign, apikey, apiv="1.3.3"):
""" Performs the dxcc lookup against the QRZ.com XML API:
"""
response = self._request_dxcc_info_from_qrz(dxcc_or_callsign, apikey, apiv=apiv)
root = BeautifulSoup(response.text, "html.parser")
lookup = {}
if root.error: #try to get a new session key and try to request again
if re.search('No DXCC Information for', root.error.text, re.I): #No data available for callsign
raise KeyError(root.error.text)
elif re.search('Session Timeout', root.error.text, re.I): # Get new session key
self._apikey = apikey = self._get_qrz_session_key(self._username, self._pwd)
response = self._request_dxcc_info_from_qrz(dxcc_or_callsign, apikey)
root = BeautifulSoup(response.text, "html.parser")
else:
raise AttributeError("Session Key Missing") #most likely session key missing or invalid
if root.dxcc is None:
raise ValueError
if root.dxcc.dxcc:
lookup[const.ADIF] = int(root.dxcc.dxcc.text)
if root.dxcc.cc:
lookup['cc'] = root.dxcc.cc.text
if root.dxcc.cc:
lookup['ccc'] = root.dxcc.ccc.text
if root.find('name'):
lookup[const.COUNTRY] = root.find('name').get_text()
if root.dxcc.continent:
lookup[const.CONTINENT] = root.dxcc.continent.text
if root.dxcc.ituzone:
lookup[const.ITUZ] = int(root.dxcc.ituzone.text)
if root.dxcc.cqzone:
lookup[const.CQZ] = int(root.dxcc.cqzone.text)
if root.dxcc.timezone:
lookup['timezone'] = float(root.dxcc.timezone.text)
if root.dxcc.lat:
lookup[const.LATITUDE] = float(root.dxcc.lat.text)
if root.dxcc.lon:
lookup[const.LONGITUDE] = float(root.dxcc.lon.text)
return lookup
def _lookup_qrz_callsign(self, callsign=None, apikey=None, apiv="1.3.3"):
""" Performs the callsign lookup against the QRZ.com XML API:
"""
if apikey is None:
raise AttributeError("Session Key Missing")
callsign = callsign.upper()
response = self._request_callsign_info_from_qrz(callsign, apikey, apiv)
root = BeautifulSoup(response.text, "html.parser")
lookup = {}
if root.error:
if re.search('Not found', root.error.text, re.I): #No data available for callsign
raise KeyError(root.error.text)
#try to get a new session key and try to request again
elif re.search('Session Timeout', root.error.text, re.I) or re.search('Invalid session key', root.error.text, re.I):
apikey = self._get_qrz_session_key(self._username, self._pwd)
response = self._request_callsign_info_from_qrz(callsign, apikey, apiv)
root = BeautifulSoup(response.text, "html.parser")
#if this fails again, raise error
if root.error:
if re.search('Not found', root.error.text, re.I): #No data available for callsign
raise KeyError(root.error.text)
else:
raise AttributeError(root.error.text) #most likely session key invalid
else:
#update API Key ob Lookup object
self._apikey = apikey
else:
raise AttributeError(root.error.text) #most likely session key missing
if root.callsign is None:
raise ValueError
if root.callsign.call:
lookup[const.CALLSIGN] = root.callsign.call.text
if root.callsign.xref:
lookup[const.XREF] = root.callsign.xref.text
if root.callsign.aliases:
lookup[const.ALIASES] = root.callsign.aliases.text.split(',')
if root.callsign.dxcc:
lookup[const.ADIF] = int(root.callsign.dxcc.text)
if root.callsign.fname:
lookup[const.FNAME] = root.callsign.fname.text
if root.callsign.find("name"):
lookup[const.NAME] = root.callsign.find('name').get_text()
if root.callsign.addr1:
lookup[const.ADDR1] = root.callsign.addr1.text
if root.callsign.addr2:
lookup[const.ADDR2] = root.callsign.addr2.text
if root.callsign.state:
lookup[const.STATE] = root.callsign.state.text
if root.callsign.zip:
lookup[const.ZIPCODE] = root.callsign.zip.text
if root.callsign.country:
lookup[const.COUNTRY] = root.callsign.country.text
if root.callsign.ccode:
lookup[const.CCODE] = int(root.callsign.ccode.text)
if root.callsign.lat:
lookup[const.LATITUDE] = float(root.callsign.lat.text)
if root.callsign.lon:
lookup[const.LONGITUDE] = float(root.callsign.lon.text)
if root.callsign.grid:
lookup[const.LOCATOR] = root.callsign.grid.text
if root.callsign.county:
lookup[const.COUNTY] = root.callsign.county.text
if root.callsign.fips:
lookup[const.FIPS] = int(root.callsign.fips.text) # check type
if root.callsign.land:
lookup[const.LAND] = root.callsign.land.text
if root.callsign.efdate:
try:
lookup[const.EFDATE] = datetime.strptime(root.callsign.efdate.text, '%Y-%m-%d').replace(tzinfo=UTC)
except ValueError:
self._logger.debug("[QRZ.com] efdate: Invalid DateTime; " + callsign + " " + root.callsign.efdate.text)
if root.callsign.expdate:
try:
lookup[const.EXPDATE] = datetime.strptime(root.callsign.expdate.text, '%Y-%m-%d').replace(tzinfo=UTC)
except ValueError:
self._logger.debug("[QRZ.com] expdate: Invalid DateTime; " + callsign + " " + root.callsign.expdate.text)
if root.callsign.p_call:
lookup[const.P_CALL] = root.callsign.p_call.text
if root.callsign.find('class'):
lookup[const.LICENSE_CLASS] = root.callsign.find('class').get_text()
if root.callsign.codes:
lookup[const.CODES] = root.callsign.codes.text
if root.callsign.qslmgr:
lookup[const.QSLMGR] = root.callsign.qslmgr.text
if root.callsign.email:
lookup[const.EMAIL] = root.callsign.email.text
if root.callsign.url:
lookup[const.URL] = root.callsign.url.text
if root.callsign.u_views:
lookup[const.U_VIEWS] = int(root.callsign.u_views.text)
if root.callsign.bio:
lookup[const.BIO] = root.callsign.bio.text
if root.callsign.biodate:
try:
lookup[const.BIODATE] = datetime.strptime(root.callsign.biodate.text, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
except ValueError:
self._logger.warning("[QRZ.com] biodate: Invalid DateTime; " + callsign)
if root.callsign.image:
lookup[const.IMAGE] = root.callsign.image.text
if root.callsign.imageinfo:
lookup[const.IMAGE_INFO] = root.callsign.imageinfo.text
if root.callsign.serial:
lookup[const.SERIAL] = long(root.callsign.serial.text)
if root.callsign.moddate:
try:
lookup[const.MODDATE] = datetime.strptime(root.callsign.moddate.text, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
except ValueError:
self._logger.warning("[QRZ.com] moddate: Invalid DateTime; " + callsign)
if root.callsign.MSA:
lookup[const.MSA] = int(root.callsign.MSA.text)
if root.callsign.AreaCode:
lookup[const.AREACODE] = int(root.callsign.AreaCode.text)
if root.callsign.TimeZone:
lookup[const.TIMEZONE] = int(root.callsign.TimeZone.text)
if root.callsign.GMTOffset:
lookup[const.GMTOFFSET] = float(root.callsign.GMTOffset.text)
if root.callsign.DST:
if root.callsign.DST.text == "Y":
lookup[const.DST] = True
else:
lookup[const.DST] = False
if root.callsign.eqsl:
if root.callsign.eqsl.text == "1":
lookup[const.EQSL] = True
else:
lookup[const.EQSL] = False
if root.callsign.mqsl:
if root.callsign.mqsl.text == "1":
lookup[const.MQSL] = True
else:
lookup[const.MQSL] = False
if root.callsign.cqzone:
lookup[const.CQZ] = int(root.callsign.cqzone.text)
if root.callsign.ituzone:
lookup[const.ITUZ] = int(root.callsign.ituzone.text)
if root.callsign.born:
lookup[const.BORN] = int(root.callsign.born.text)
if root.callsign.user:
lookup[const.USER_MGR] = root.callsign.user.text
if root.callsign.lotw:
if root.callsign.lotw.text == "1":
lookup[const.LOTW] = True
else:
lookup[const.LOTW] = False
if root.callsign.iota:
lookup[const.IOTA] = root.callsign.iota.text
if root.callsign.geoloc:
lookup[const.GEOLOC] = root.callsign.geoloc.text
# if sys.version_info >= (2,):
# for item in lookup:
# if isinstance(lookup[item], unicode):
# print item, repr(lookup[item])
return lookup
def _load_clublogXML(self,
url="https://secure.clublog.org/cty.php",
apikey=None,
cty_file=None):
""" Load and process the ClublogXML file either as a download or from file
"""
if self._download:
cty_file = self._download_file(
url = url,
apikey = apikey)
else:
cty_file = self._lib_filename
header = self._extract_clublog_header(cty_file)
cty_file = self._remove_clublog_xml_header(cty_file)
cty_dict = self._parse_clublog_xml(cty_file)
self._entities = cty_dict["entities"]
self._callsign_exceptions = cty_dict["call_exceptions"]
self._prefixes = cty_dict["prefixes"]
self._invalid_operations = cty_dict["invalid_operations"]
self._zone_exceptions = cty_dict["zone_exceptions"]
self._callsign_exceptions_index = cty_dict["call_exceptions_index"]
self._prefixes_index = cty_dict["prefixes_index"]
self._invalid_operations_index = cty_dict["invalid_operations_index"]
self._zone_exceptions_index = cty_dict["zone_exceptions_index"]
if self._download:
self._cleanup_download_artifact(cty_file)
return True
def _load_countryfile(self,
url="https://www.country-files.com/cty/cty.plist",
country_mapping_filename="countryfilemapping.json",
cty_file=None):
""" Load and process the ClublogXML file either as a download or from file
"""
cwdFile = os.path.abspath(os.path.join(os.getcwd(), country_mapping_filename))
pkgFile = os.path.abspath(os.path.join(os.path.dirname(__file__), country_mapping_filename))
# from cwd
if os.path.exists(cwdFile):
# country mapping files contains the ADIF identifiers of a particular
# country since the country-files do not provide this information (only DXCC id)
country_mapping_filename = cwdFile
# from package
elif os.path.exists(pkgFile):
country_mapping_filename = pkgFile
else:
country_mapping_filename = None
if self._download:
cty_file = self._download_file(url=url)
else:
cty_file = os.path.abspath(cty_file)
cty_dict = self._parse_country_file(cty_file, country_mapping_filename)
self._callsign_exceptions = cty_dict["exceptions"]
self._prefixes = cty_dict["prefixes"]
self._callsign_exceptions_index = cty_dict["exceptions_index"]
self._prefixes_index = cty_dict["prefixes_index"]
if self._download:
self._cleanup_download_artifact(cty_file)
return True
def _download_file(self, url, apikey=None):
""" Download lookup files either from Clublog or Country-files.com
"""
import gzip
import tempfile
cty = {}
cty_date = ""
cty_file_path = None
filename = None
# download file
if apikey: # clublog
response = requests.get(url+"?api="+apikey, timeout=10)
else: # country-files.com
response = requests.get(url, timeout=10)
if not self._check_html_response(response):
raise LookupError
#Clublog Webserver Header
if "Content-Disposition" in response.headers:
f = re.search('filename=".+"', response.headers["Content-Disposition"])
if f:
f = f.group(0)
filename = re.search('".+"', f).group(0).replace('"', '')
#Country-files.org webserver header
else:
f = re.search('/.{4}plist$', url)
if f:
f = f.group(0)
filename = f[1:]
if not filename:
filename = "cty_" + self._generate_random_word(5)
download_file_path = os.path.join(tempfile.gettempdir(), filename)
with open(download_file_path, "wb") as download_file:
download_file.write(response.content)
self._logger.debug(str(download_file_path) + " successfully downloaded")
# unzip file, if gz
if os.path.splitext(download_file_path)[1][1:] == "gz":
download_file = gzip.open(download_file_path, "r")
try:
cty_file_path = os.path.join(os.path.splitext(download_file_path)[0])
with open(cty_file_path, "wb") as cty_file:
cty_file.write(download_file.read())
self._logger.debug(str(cty_file_path) + " successfully extracted")
finally:
download_file.close()
else:
cty_file_path = download_file_path
return cty_file_path
def _cleanup_download_artifact(self, filename):
"""
Delete the downloaded files which are not necessary anymore
Args:
filename (string): absolute path to the download artifact
"""
try:
os.remove(filename)
except:
self._logger.warning("unable delete the download artifact: %s", _download_file)
def _extract_clublog_header(self, cty_xml_filename):
"""
Extract the header of the Clublog XML File
"""
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("'", "")
cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S')
cty_date.replace(tzinfo=UTC)
cty_header["Date"] = cty_date
cty_ns = re.search("xmlns='.+[']", raw_header)
if cty_ns:
cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "")
cty_header['NameSpace'] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return
def _remove_clublog_xml_header(self, cty_xml_filename):
"""
remove the header of the Clublog XML File to make it
properly parseable for the python ElementTree XML parser
"""
import tempfile
try:
with open(cty_xml_filename, "r") as f:
content = f.readlines()
cty_dir = tempfile.gettempdir()
cty_name = os.path.split(cty_xml_filename)[1]
cty_xml_filename_no_header = os.path.join(cty_dir, "NoHeader_"+cty_name)
with open(cty_xml_filename_no_header, "w") as f:
f.writelines("<clublog>\n\r")
f.writelines(content[1:])
self._logger.debug("Header successfully modified for XML Parsing")
return cty_xml_filename_no_header
except Exception as e:
self._logger.error("Clublog CTY could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return
def _parse_clublog_xml(self, cty_xml_filename):
"""
parse the content of a clublog XML file and return the
parsed values in dictionaries
"""
entities = {}
call_exceptions = {}
prefixes = {}
invalid_operations = {}
zone_exceptions = {}
call_exceptions_index = {}
prefixes_index = {}
invalid_operations_index = {}
zone_exceptions_index = {}
cty_tree = ET.parse(cty_xml_filename)
root = cty_tree.getroot()
#retrieve ADIF Country Entities
cty_entities = cty_tree.find("entities")
self._logger.debug("total entities: " + str(len(cty_entities)))
if len(cty_entities) > 1:
for cty_entity in cty_entities:
try:
entity = {}
for item in cty_entity:
if item.tag == "name":
entity[const.COUNTRY] = unicode(item.text)
self._logger.debug(unicode(item.text))
elif item.tag == "prefix":
entity[const.PREFIX] = unicode(item.text)
elif item.tag == "deleted":
if item.text == "TRUE":
entity[const.DELETED] = True
else:
entity[const.DELETED] = False
elif item.tag == "cqz":
entity[const.CQZ] = int(item.text)
elif item.tag == "cont":
entity[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
entity[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
entity[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.END] = dt.replace(tzinfo=UTC)
elif item.tag == "whitelist":
if item.text == "TRUE":
entity[const.WHITELIST] = True
else:
entity[const.WHITELIST] = False
elif item.tag == "whitelist_start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.WHITELIST_START] = dt.replace(tzinfo=UTC)
elif item.tag == "whitelist_end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.WHITELIST_END] = dt.replace(tzinfo=UTC)
except AttributeError:
self._logger.error("Error while processing: ")
entities[int(cty_entity[0].text)] = entity
self._logger.debug(str(len(entities))+" Entities added")
else:
raise Exception("No Country Entities detected in XML File")
cty_exceptions = cty_tree.find("exceptions")
if len(cty_exceptions) > 1:
for cty_exception in cty_exceptions:
call_exception = {}
for item in cty_exception:
if item.tag == "call":
call = str(item.text)
if call in call_exceptions_index.keys():
call_exceptions_index[call].append(int(cty_exception.attrib["record"]))
else:
call_exceptions_index[call] = [int(cty_exception.attrib["record"])]
elif item.tag == "entity":
call_exception[const.COUNTRY] = unicode(item.text)
elif item.tag == "adif":
call_exception[const.ADIF] = int(item.text)
elif item.tag == "cqz":
call_exception[const.CQZ] = int(item.text)
elif item.tag == "cont":
call_exception[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
call_exception[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
call_exception[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
call_exception[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
call_exception[const.END] = dt.replace(tzinfo=UTC)
call_exceptions[int(cty_exception.attrib["record"])] = call_exception
self._logger.debug(str(len(call_exceptions))+" Exceptions added")
self._logger.debug(str(len(call_exceptions_index))+" unique Calls in Index ")
else:
raise Exception("No Exceptions detected in XML File")
cty_prefixes = cty_tree.find("prefixes")
if len(cty_prefixes) > 1:
for cty_prefix in cty_prefixes:
prefix = {}
for item in cty_prefix:
pref = None
if item.tag == "call":
#create index for this prefix
call = str(item.text)
if call in prefixes_index.keys():
prefixes_index[call].append(int(cty_prefix.attrib["record"]))
else:
prefixes_index[call] = [int(cty_prefix.attrib["record"])]
if item.tag == "entity":
prefix[const.COUNTRY] = unicode(item.text)
elif item.tag == "adif":
prefix[const.ADIF] = int(item.text)
elif item.tag == "cqz":
prefix[const.CQZ] = int(item.text)
elif item.tag == "cont":
prefix[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
prefix[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
prefix[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
prefix[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
prefix[const.END] = dt.replace(tzinfo=UTC)
prefixes[int(cty_prefix.attrib["record"])] = prefix
self._logger.debug(str(len(prefixes))+" Prefixes added")
self._logger.debug(str(len(prefixes_index))+" unique Prefixes in Index")
else:
raise Exception("No Prefixes detected in XML File")
cty_inv_operations = cty_tree.find("invalid_operations")
if len(cty_inv_operations) > 1:
for cty_inv_operation in cty_inv_operations:
invalid_operation = {}
for item in cty_inv_operation:
call = None
if item.tag == "call":
call = str(item.text)
if call in invalid_operations_index.keys():
invalid_operations_index[call].append(int(cty_inv_operation.attrib["record"]))
else:
invalid_operations_index[call] = [int(cty_inv_operation.attrib["record"])]
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
invalid_operation[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
invalid_operation[const.END] = dt.replace(tzinfo=UTC)
invalid_operations[int(cty_inv_operation.attrib["record"])] = invalid_operation
self._logger.debug(str(len(invalid_operations))+" Invalid Operations added")
self._logger.debug(str(len(invalid_operations_index))+" unique Calls in Index")
else:
raise Exception("No records for invalid operations detected in XML File")
cty_zone_exceptions = cty_tree.find("zone_exceptions")
if len(cty_zone_exceptions) > 1:
for cty_zone_exception in cty_zone_exceptions:
zoneException = {}
for item in cty_zone_exception:
call = None
if item.tag == "call":
call = str(item.text)
if call in zone_exceptions_index.keys():
zone_exceptions_index[call].append(int(cty_zone_exception.attrib["record"]))
else:
zone_exceptions_index[call] = [int(cty_zone_exception.attrib["record"])]
elif item.tag == "zone":
zoneException[const.CQZ] = int(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
zoneException[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
zoneException[const.END] = dt.replace(tzinfo=UTC)
zone_exceptions[int(cty_zone_exception.attrib["record"])] = zoneException
self._logger.debug(str(len(zone_exceptions))+" Zone Exceptions added")
self._logger.debug(str(len(zone_exceptions_index))+" unique Calls in Index")
else:
raise Exception("No records for zone exceptions detected in XML File")
result = {
"entities" : entities,
"call_exceptions" : call_exceptions,
"prefixes" : prefixes,
"invalid_operations" : invalid_operations,
"zone_exceptions" : zone_exceptions,
"prefixes_index" : prefixes_index,
"call_exceptions_index" : call_exceptions_index,
"invalid_operations_index" : invalid_operations_index,
"zone_exceptions_index" : zone_exceptions_index,
}
return result
def _parse_country_file(self, cty_file, country_mapping_filename=None):
"""
Parse the content of a PLIST file from country-files.com return the
parsed values in dictionaries.
Country-files.com provides Prefixes and Exceptions
"""
import plistlib
cty_list = None
entities = {}
exceptions = {}
prefixes = {}
exceptions_index = {}
prefixes_index = {}
exceptions_counter = 0
prefixes_counter = 0
mapping = None
with open(country_mapping_filename, "r") as f:
mapping = json.loads(f.read(),encoding='UTF-8')
cty_list = plistlib.readPlist(cty_file)
for item in cty_list:
entry = {}
call = str(item)
entry[const.COUNTRY] = unicode(cty_list[item]["Country"])
if mapping:
entry[const.ADIF] = int(mapping[cty_list[item]["Country"]])
entry[const.CQZ] = int(cty_list[item]["CQZone"])
entry[const.ITUZ] = int(cty_list[item]["ITUZone"])
entry[const.CONTINENT] = unicode(cty_list[item]["Continent"])
entry[const.LATITUDE] = float(cty_list[item]["Latitude"])
entry[const.LONGITUDE] = float(cty_list[item]["Longitude"])*(-1)
if cty_list[item]["ExactCallsign"]:
if call in exceptions_index.keys():
exceptions_index[call].append(exceptions_counter)
else:
exceptions_index[call] = [exceptions_counter]
exceptions[exceptions_counter] = entry
exceptions_counter += 1
else:
if call in prefixes_index.keys():
prefixes_index[call].append(prefixes_counter)
else:
prefixes_index[call] = [prefixes_counter]
prefixes[prefixes_counter] = entry
prefixes_counter += 1
self._logger.debug(str(len(prefixes))+" Prefixes added")
self._logger.debug(str(len(prefixes_index))+" Prefixes in Index")
self._logger.debug(str(len(exceptions))+" Exceptions added")
self._logger.debug(str(len(exceptions_index))+" Exceptions in Index")
result = {
"prefixes" : prefixes,
"exceptions" : exceptions,
"prefixes_index" : prefixes_index,
"exceptions_index" : exceptions_index,
}
return result
def _generate_random_word(self, length):
"""
Generates a random word
"""
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
def _check_html_response(self, response):
"""
Checks if the API Key is valid and if the request returned a 200 status (ok)
"""
error1 = "Access to this form requires a valid API key. For more info see: http://www.clublog.org/need_api.php"
error2 = "Invalid or missing API Key"
if response.status_code == requests.codes.ok:
return True
else:
err_str = "HTTP Status Code: " + str(response.status_code) + " HTTP Response: " + str(response.text)
self._logger.error(err_str)
if response.status_code == 403:
raise APIKeyMissingError
else:
raise LookupError(err_str)
def _serialize_data(self, my_dict):
"""
Serialize a Dictionary into JSON
"""
new_dict = {}
for item in my_dict:
if isinstance(my_dict[item], datetime):
new_dict[item] = my_dict[item].strftime('%Y-%m-%d%H:%M:%S')
else:
new_dict[item] = str(my_dict[item])
return json.dumps(new_dict)
def _deserialize_data(self, json_data):
"""
Deserialize a JSON into a dictionary
"""
my_dict = json.loads(json_data.decode('utf8'), encoding='UTF-8')
for item in my_dict:
if item == const.ADIF:
my_dict[item] = int(my_dict[item])
elif item == const.DELETED:
my_dict[item] = self._str_to_bool(my_dict[item])
elif item == const.CQZ:
my_dict[item] = int(my_dict[item])
elif item == const.ITUZ:
my_dict[item] = int(my_dict[item])
elif item == const.LATITUDE:
my_dict[item] = float(my_dict[item])
elif item == const.LONGITUDE:
my_dict[item] = float(my_dict[item])
elif item == const.START:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.END:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST_START:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST_END:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST:
my_dict[item] = self._str_to_bool(my_dict[item])
else:
my_dict[item] = unicode(my_dict[item])
return my_dict
def _str_to_bool(self, input):
if input.lower() == "true":
return True
elif input.lower() == "false":
return False
else:
raise KeyError
| 39.646718
| 177
| 0.57467
|
from __future__ import unicode_literals
import os
import logging
import logging.config
import re
import random, string
from datetime import datetime
import xml.etree.ElementTree as ET
import urllib
import json
import copy
import sys
import unicodedata
import requests
from requests.exceptions import ConnectionError, HTTPError, Timeout
from bs4 import BeautifulSoup
import pytz
from . import version
from .consts import LookupConventions as const
from .exceptions import APIKeyMissingError
UTC = pytz.UTC
REDIS_LUA_DEL_SCRIPT = "local keys = redis.call('keys', ARGV[1]) \n for i=1,#keys,20000 do \n redis.call('del', unpack(keys, i, math.min(i+19999, #keys))) \n end \n return keys"
if sys.version_info < (2, 7,):
class NullHandler(logging.Handler):
def emit(self, record):
pass
if sys.version_info.major == 3:
unicode = str
class LookupLib(object):
def __init__(self, lookuptype = "countryfile", apikey=None, apiv="1.3.3", filename=None, logger=None, username=None, pwd=None, redis_instance=None, redis_prefix=None):
self._logger = None
if logger:
self._logger = logger
else:
self._logger = logging.getLogger(__name__)
if sys.version_info[:2] == (2, 6):
self._logger.addHandler(NullHandler())
else:
self._logger.addHandler(logging.NullHandler())
self._apikey = apikey
self._apiv = apiv
self._download = True
self._lib_filename = filename
self._redis = redis_instance
self._redis_prefix = redis_prefix
self._username = username
self._pwd = pwd
if self._lib_filename:
self._download = False
self._callsign_exceptions_index = {}
self._invalid_operations_index = {}
self._zone_exceptions_index = {}
self._entities = {}
self._callsign_exceptions = {}
self._invalid_operations = {}
self._zone_exceptions = {}
self._lookuptype = lookuptype
if self._lookuptype == "clublogxml":
self._load_clublogXML(apikey=self._apikey, cty_file=self._lib_filename)
elif self._lookuptype == "countryfile":
self._load_countryfile(cty_file=self._lib_filename)
elif self._lookuptype == "clublogapi":
pass
elif self._lookuptype == "redis":
import redis
elif self._lookuptype == "qrz":
self._apikey = self._get_qrz_session_key(self._username, self._pwd)
else:
raise AttributeError("Lookup type missing")
def _get_qrz_session_key(self, username, pwd):
qrz_api_version = "1.3.3"
url = "https://xmldata.qrz.com/xml/" + qrz_api_version + "/"
agent = "PyHamTools"+version.__version__
params = {"username" : username,
"password" : pwd,
"agent" : agent
}
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=10)
doc = BeautifulSoup(response.text, "html.parser")
session_key = None
if doc.session.key:
session_key = doc.session.key.text
else:
if doc.session.error:
raise ValueError(doc.session.error.text)
else:
raise ValueError("Could not retrieve Session Key from QRZ.com")
return session_key
def copy_data_in_redis(self, redis_prefix, redis_instance):
if redis_instance is not None:
self._redis = redis_instance
if self._redis is None:
raise AttributeError("redis_instance is missing")
if redis_prefix is None:
raise KeyError("redis_prefix is missing")
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
self._push_dict_to_redis(self._entities, redis_prefix, "_entity_")
self._push_dict_index_to_redis(self._callsign_exceptions_index, redis_prefix, "_call_ex_index_")
self._push_dict_to_redis(self._callsign_exceptions, redis_prefix, "_call_ex_")
self._push_dict_index_to_redis(self._prefixes_index, redis_prefix, "_prefix_index_")
self._push_dict_to_redis(self._prefixes, redis_prefix, "_prefix_")
self._push_dict_index_to_redis(self._invalid_operations_index, redis_prefix, "_inv_op_index_")
self._push_dict_to_redis(self._invalid_operations, redis_prefix, "_inv_op_")
self._push_dict_index_to_redis(self._zone_exceptions_index, redis_prefix, "_zone_ex_index_")
self._push_dict_to_redis(self._zone_exceptions, redis_prefix, "_zone_ex_")
return True
def _push_dict_to_redis(self, push_dict, redis_prefix, name):
r = self._redis
pipe = r.pipeline()
pipe.eval(REDIS_LUA_DEL_SCRIPT, 0, redis_prefix + name)
for i in push_dict:
json_data = self._serialize_data(push_dict[i])
pipe.set(redis_prefix + name + str(i), json_data)
pipe.execute()
return True
def _push_dict_index_to_redis(self, index_dict, redis_prefix, name):
r = self._redis
pipe = r.pipeline()
pipe.eval(REDIS_LUA_DEL_SCRIPT, 0, redis_prefix + name)
for i in index_dict:
for el in index_dict[i]:
pipe.sadd(redis_prefix + name + str(i), el)
pipe.execute()
return True
def lookup_entity(self, entity=None):
if self._lookuptype == "clublogxml":
entity = int(entity)
if entity in self._entities:
return self._strip_metadata(self._entities[entity])
else:
raise KeyError
elif self._lookuptype == "redis":
if self._redis_prefix is None:
raise KeyError ("redis_prefix is missing")
json_data = self._redis.get(self._redis_prefix + "_entity_" + str(entity))
if json_data is not None:
my_dict = self._deserialize_data(json_data)
return self._strip_metadata(my_dict)
elif self._lookuptype == "qrz":
result = self._lookup_qrz_dxcc(entity, self._apikey)
return result
raise KeyError
def _strip_metadata(self, my_dict):
new_dict = copy.deepcopy(my_dict)
if const.START in new_dict:
del new_dict[const.START]
if const.END in new_dict:
del new_dict[const.END]
if const.WHITELIST in new_dict:
del new_dict[const.WHITELIST]
if const.WHITELIST_START in new_dict:
del new_dict[const.WHITELIST_START]
if const.WHITELIST_END in new_dict:
del new_dict[const.WHITELIST_END]
return new_dict
def lookup_callsign(self, callsign=None, timestamp=None):
callsign = callsign.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogapi":
callsign_data = self._lookup_clublogAPI(callsign=callsign, timestamp=timestamp, apikey=self._apikey)
if callsign_data[const.ADIF]==1000:
raise KeyError
else:
return callsign_data
elif self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(callsign, timestamp, self._callsign_exceptions, self._callsign_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_call_ex_", "_call_ex_index_", self._redis_prefix, callsign)
return self._check_data_for_date(callsign, timestamp, data_dict, index)
elif self._lookuptype == "qrz":
return self._lookup_qrz_callsign(callsign, self._apikey, self._apiv)
raise KeyError("unknown Callsign")
def _get_dicts_from_redis(self, name, index_name, redis_prefix, item):
r = self._redis
data_dict = {}
data_index_dict = {}
if redis_prefix is None:
raise KeyError ("redis_prefix is missing")
if r.scard(redis_prefix + index_name + str(item)) > 0:
data_index_dict[str(item)] = r.smembers(redis_prefix + index_name + str(item))
for i in data_index_dict[item]:
json_data = r.get(redis_prefix + name + str(int(i)))
data_dict[i] = self._deserialize_data(json_data)
return (data_dict, data_index_dict)
raise KeyError ("No Data found in Redis for "+ item)
def _check_data_for_date(self, item, timestamp, data_dict, data_index_dict):
if item in data_index_dict:
for item in data_index_dict[item]:
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
return item_data
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.END]
return item_data
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
del item_data[const.END]
return item_data
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item]
raise KeyError
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict):
if item in data_index_dict:
for item in data_index_dict[item]:
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return True
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return True
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return True
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return True
raise KeyError
def lookup_prefix(self, prefix, timestamp=None):
prefix = prefix.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
raise KeyError
def is_invalid_operation(self, callsign, timestamp=None):
callsign = callsign.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogxml":
return self._check_inv_operation_for_date(callsign, timestamp, self._invalid_operations, self._invalid_operations_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_inv_op_", "_inv_op_index_", self._redis_prefix, callsign)
return self._check_inv_operation_for_date(callsign, timestamp, data_dict, index)
raise KeyError
def _check_zone_exception_for_date(self, item, timestamp, data_dict, data_index_dict):
if item in data_index_dict:
for item in data_index_dict[item]:
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return data_dict[item][const.CQZ]
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item][const.CQZ]
raise KeyError
def lookup_zone_exception(self, callsign, timestamp=None):
callsign = callsign.strip().upper()
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if self._lookuptype == "clublogxml":
return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign)
return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index)
raise KeyError
def _lookup_clublogAPI(self, callsign=None, timestamp=None, url="https://secure.clublog.org/dxcc", apikey=None):
params = {"year" : timestamp.strftime("%Y"),
"month" : timestamp.strftime("%m"),
"day" : timestamp.strftime("%d"),
"hour" : timestamp.strftime("%H"),
"minute" : timestamp.strftime("%M"),
"api" : apikey,
"full" : "1",
"call" : callsign
}
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=UTC)
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=5)
if not self._check_html_response(response):
raise LookupError
jsonLookup = response.json()
lookup = {}
for item in jsonLookup:
if item == "Name": lookup[const.COUNTRY] = jsonLookup["Name"]
elif item == "DXCC": lookup[const.ADIF] = int(jsonLookup["DXCC"])
elif item == "Lon": lookup[const.LONGITUDE] = float(jsonLookup["Lon"])*(-1)
elif item == "Lat": lookup[const.LATITUDE] = float(jsonLookup["Lat"])
elif item == "CQZ": lookup[const.CQZ] = int(jsonLookup["CQZ"])
elif item == "Continent": lookup[const.CONTINENT] = jsonLookup["Continent"]
if lookup[const.ADIF] == 0:
raise KeyError
else:
return lookup
def _request_callsign_info_from_qrz(self, callsign, apikey, apiv="1.3.3"):
qrz_api_version = apiv
url = "https://xmldata.qrz.com/xml/" + qrz_api_version + "/"
params = {
"s": apikey,
"callsign" : callsign,
}
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=5)
return response
def _request_dxcc_info_from_qrz(self, dxcc_or_callsign, apikey, apiv="1.3.3"):
qrz_api_version = apiv
url = "https://xmldata.qrz.com/xml/" + qrz_api_version + "/"
params = {
"s": apikey,
"dxcc" : str(dxcc_or_callsign),
}
if sys.version_info.major == 3:
encodeurl = url + "?" + urllib.parse.urlencode(params)
else:
encodeurl = url + "?" + urllib.urlencode(params)
response = requests.get(encodeurl, timeout=5)
return response
def _lookup_qrz_dxcc(self, dxcc_or_callsign, apikey, apiv="1.3.3"):
response = self._request_dxcc_info_from_qrz(dxcc_or_callsign, apikey, apiv=apiv)
root = BeautifulSoup(response.text, "html.parser")
lookup = {}
if root.error:
if re.search('No DXCC Information for', root.error.text, re.I):
raise KeyError(root.error.text)
elif re.search('Session Timeout', root.error.text, re.I):
self._apikey = apikey = self._get_qrz_session_key(self._username, self._pwd)
response = self._request_dxcc_info_from_qrz(dxcc_or_callsign, apikey)
root = BeautifulSoup(response.text, "html.parser")
else:
raise AttributeError("Session Key Missing")
if root.dxcc is None:
raise ValueError
if root.dxcc.dxcc:
lookup[const.ADIF] = int(root.dxcc.dxcc.text)
if root.dxcc.cc:
lookup['cc'] = root.dxcc.cc.text
if root.dxcc.cc:
lookup['ccc'] = root.dxcc.ccc.text
if root.find('name'):
lookup[const.COUNTRY] = root.find('name').get_text()
if root.dxcc.continent:
lookup[const.CONTINENT] = root.dxcc.continent.text
if root.dxcc.ituzone:
lookup[const.ITUZ] = int(root.dxcc.ituzone.text)
if root.dxcc.cqzone:
lookup[const.CQZ] = int(root.dxcc.cqzone.text)
if root.dxcc.timezone:
lookup['timezone'] = float(root.dxcc.timezone.text)
if root.dxcc.lat:
lookup[const.LATITUDE] = float(root.dxcc.lat.text)
if root.dxcc.lon:
lookup[const.LONGITUDE] = float(root.dxcc.lon.text)
return lookup
def _lookup_qrz_callsign(self, callsign=None, apikey=None, apiv="1.3.3"):
if apikey is None:
raise AttributeError("Session Key Missing")
callsign = callsign.upper()
response = self._request_callsign_info_from_qrz(callsign, apikey, apiv)
root = BeautifulSoup(response.text, "html.parser")
lookup = {}
if root.error:
if re.search('Not found', root.error.text, re.I):
raise KeyError(root.error.text)
elif re.search('Session Timeout', root.error.text, re.I) or re.search('Invalid session key', root.error.text, re.I):
apikey = self._get_qrz_session_key(self._username, self._pwd)
response = self._request_callsign_info_from_qrz(callsign, apikey, apiv)
root = BeautifulSoup(response.text, "html.parser")
if root.error:
if re.search('Not found', root.error.text, re.I):
raise KeyError(root.error.text)
else:
raise AttributeError(root.error.text)
else:
self._apikey = apikey
else:
raise AttributeError(root.error.text)
if root.callsign is None:
raise ValueError
if root.callsign.call:
lookup[const.CALLSIGN] = root.callsign.call.text
if root.callsign.xref:
lookup[const.XREF] = root.callsign.xref.text
if root.callsign.aliases:
lookup[const.ALIASES] = root.callsign.aliases.text.split(',')
if root.callsign.dxcc:
lookup[const.ADIF] = int(root.callsign.dxcc.text)
if root.callsign.fname:
lookup[const.FNAME] = root.callsign.fname.text
if root.callsign.find("name"):
lookup[const.NAME] = root.callsign.find('name').get_text()
if root.callsign.addr1:
lookup[const.ADDR1] = root.callsign.addr1.text
if root.callsign.addr2:
lookup[const.ADDR2] = root.callsign.addr2.text
if root.callsign.state:
lookup[const.STATE] = root.callsign.state.text
if root.callsign.zip:
lookup[const.ZIPCODE] = root.callsign.zip.text
if root.callsign.country:
lookup[const.COUNTRY] = root.callsign.country.text
if root.callsign.ccode:
lookup[const.CCODE] = int(root.callsign.ccode.text)
if root.callsign.lat:
lookup[const.LATITUDE] = float(root.callsign.lat.text)
if root.callsign.lon:
lookup[const.LONGITUDE] = float(root.callsign.lon.text)
if root.callsign.grid:
lookup[const.LOCATOR] = root.callsign.grid.text
if root.callsign.county:
lookup[const.COUNTY] = root.callsign.county.text
if root.callsign.fips:
lookup[const.FIPS] = int(root.callsign.fips.text)
if root.callsign.land:
lookup[const.LAND] = root.callsign.land.text
if root.callsign.efdate:
try:
lookup[const.EFDATE] = datetime.strptime(root.callsign.efdate.text, '%Y-%m-%d').replace(tzinfo=UTC)
except ValueError:
self._logger.debug("[QRZ.com] efdate: Invalid DateTime; " + callsign + " " + root.callsign.efdate.text)
if root.callsign.expdate:
try:
lookup[const.EXPDATE] = datetime.strptime(root.callsign.expdate.text, '%Y-%m-%d').replace(tzinfo=UTC)
except ValueError:
self._logger.debug("[QRZ.com] expdate: Invalid DateTime; " + callsign + " " + root.callsign.expdate.text)
if root.callsign.p_call:
lookup[const.P_CALL] = root.callsign.p_call.text
if root.callsign.find('class'):
lookup[const.LICENSE_CLASS] = root.callsign.find('class').get_text()
if root.callsign.codes:
lookup[const.CODES] = root.callsign.codes.text
if root.callsign.qslmgr:
lookup[const.QSLMGR] = root.callsign.qslmgr.text
if root.callsign.email:
lookup[const.EMAIL] = root.callsign.email.text
if root.callsign.url:
lookup[const.URL] = root.callsign.url.text
if root.callsign.u_views:
lookup[const.U_VIEWS] = int(root.callsign.u_views.text)
if root.callsign.bio:
lookup[const.BIO] = root.callsign.bio.text
if root.callsign.biodate:
try:
lookup[const.BIODATE] = datetime.strptime(root.callsign.biodate.text, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
except ValueError:
self._logger.warning("[QRZ.com] biodate: Invalid DateTime; " + callsign)
if root.callsign.image:
lookup[const.IMAGE] = root.callsign.image.text
if root.callsign.imageinfo:
lookup[const.IMAGE_INFO] = root.callsign.imageinfo.text
if root.callsign.serial:
lookup[const.SERIAL] = long(root.callsign.serial.text)
if root.callsign.moddate:
try:
lookup[const.MODDATE] = datetime.strptime(root.callsign.moddate.text, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
except ValueError:
self._logger.warning("[QRZ.com] moddate: Invalid DateTime; " + callsign)
if root.callsign.MSA:
lookup[const.MSA] = int(root.callsign.MSA.text)
if root.callsign.AreaCode:
lookup[const.AREACODE] = int(root.callsign.AreaCode.text)
if root.callsign.TimeZone:
lookup[const.TIMEZONE] = int(root.callsign.TimeZone.text)
if root.callsign.GMTOffset:
lookup[const.GMTOFFSET] = float(root.callsign.GMTOffset.text)
if root.callsign.DST:
if root.callsign.DST.text == "Y":
lookup[const.DST] = True
else:
lookup[const.DST] = False
if root.callsign.eqsl:
if root.callsign.eqsl.text == "1":
lookup[const.EQSL] = True
else:
lookup[const.EQSL] = False
if root.callsign.mqsl:
if root.callsign.mqsl.text == "1":
lookup[const.MQSL] = True
else:
lookup[const.MQSL] = False
if root.callsign.cqzone:
lookup[const.CQZ] = int(root.callsign.cqzone.text)
if root.callsign.ituzone:
lookup[const.ITUZ] = int(root.callsign.ituzone.text)
if root.callsign.born:
lookup[const.BORN] = int(root.callsign.born.text)
if root.callsign.user:
lookup[const.USER_MGR] = root.callsign.user.text
if root.callsign.lotw:
if root.callsign.lotw.text == "1":
lookup[const.LOTW] = True
else:
lookup[const.LOTW] = False
if root.callsign.iota:
lookup[const.IOTA] = root.callsign.iota.text
if root.callsign.geoloc:
lookup[const.GEOLOC] = root.callsign.geoloc.text
return lookup
def _load_clublogXML(self,
url="https://secure.clublog.org/cty.php",
apikey=None,
cty_file=None):
if self._download:
cty_file = self._download_file(
url = url,
apikey = apikey)
else:
cty_file = self._lib_filename
header = self._extract_clublog_header(cty_file)
cty_file = self._remove_clublog_xml_header(cty_file)
cty_dict = self._parse_clublog_xml(cty_file)
self._entities = cty_dict["entities"]
self._callsign_exceptions = cty_dict["call_exceptions"]
self._prefixes = cty_dict["prefixes"]
self._invalid_operations = cty_dict["invalid_operations"]
self._zone_exceptions = cty_dict["zone_exceptions"]
self._callsign_exceptions_index = cty_dict["call_exceptions_index"]
self._prefixes_index = cty_dict["prefixes_index"]
self._invalid_operations_index = cty_dict["invalid_operations_index"]
self._zone_exceptions_index = cty_dict["zone_exceptions_index"]
if self._download:
self._cleanup_download_artifact(cty_file)
return True
def _load_countryfile(self,
url="https://www.country-files.com/cty/cty.plist",
country_mapping_filename="countryfilemapping.json",
cty_file=None):
cwdFile = os.path.abspath(os.path.join(os.getcwd(), country_mapping_filename))
pkgFile = os.path.abspath(os.path.join(os.path.dirname(__file__), country_mapping_filename))
if os.path.exists(cwdFile):
country_mapping_filename = cwdFile
elif os.path.exists(pkgFile):
country_mapping_filename = pkgFile
else:
country_mapping_filename = None
if self._download:
cty_file = self._download_file(url=url)
else:
cty_file = os.path.abspath(cty_file)
cty_dict = self._parse_country_file(cty_file, country_mapping_filename)
self._callsign_exceptions = cty_dict["exceptions"]
self._prefixes = cty_dict["prefixes"]
self._callsign_exceptions_index = cty_dict["exceptions_index"]
self._prefixes_index = cty_dict["prefixes_index"]
if self._download:
self._cleanup_download_artifact(cty_file)
return True
def _download_file(self, url, apikey=None):
import gzip
import tempfile
cty = {}
cty_date = ""
cty_file_path = None
filename = None
if apikey:
response = requests.get(url+"?api="+apikey, timeout=10)
else:
response = requests.get(url, timeout=10)
if not self._check_html_response(response):
raise LookupError
if "Content-Disposition" in response.headers:
f = re.search('filename=".+"', response.headers["Content-Disposition"])
if f:
f = f.group(0)
filename = re.search('".+"', f).group(0).replace('"', '')
#Country-files.org webserver header
else:
f = re.search('/.{4}plist$', url)
if f:
f = f.group(0)
filename = f[1:]
if not filename:
filename = "cty_" + self._generate_random_word(5)
download_file_path = os.path.join(tempfile.gettempdir(), filename)
with open(download_file_path, "wb") as download_file:
download_file.write(response.content)
self._logger.debug(str(download_file_path) + " successfully downloaded")
# unzip file, if gz
if os.path.splitext(download_file_path)[1][1:] == "gz":
download_file = gzip.open(download_file_path, "r")
try:
cty_file_path = os.path.join(os.path.splitext(download_file_path)[0])
with open(cty_file_path, "wb") as cty_file:
cty_file.write(download_file.read())
self._logger.debug(str(cty_file_path) + " successfully extracted")
finally:
download_file.close()
else:
cty_file_path = download_file_path
return cty_file_path
def _cleanup_download_artifact(self, filename):
try:
os.remove(filename)
except:
self._logger.warning("unable delete the download artifact: %s", _download_file)
def _extract_clublog_header(self, cty_xml_filename):
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("'", "")
cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S')
cty_date.replace(tzinfo=UTC)
cty_header["Date"] = cty_date
cty_ns = re.search("xmlns='.+[']", raw_header)
if cty_ns:
cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "")
cty_header['NameSpace'] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return
def _remove_clublog_xml_header(self, cty_xml_filename):
import tempfile
try:
with open(cty_xml_filename, "r") as f:
content = f.readlines()
cty_dir = tempfile.gettempdir()
cty_name = os.path.split(cty_xml_filename)[1]
cty_xml_filename_no_header = os.path.join(cty_dir, "NoHeader_"+cty_name)
with open(cty_xml_filename_no_header, "w") as f:
f.writelines("<clublog>\n\r")
f.writelines(content[1:])
self._logger.debug("Header successfully modified for XML Parsing")
return cty_xml_filename_no_header
except Exception as e:
self._logger.error("Clublog CTY could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return
def _parse_clublog_xml(self, cty_xml_filename):
entities = {}
call_exceptions = {}
prefixes = {}
invalid_operations = {}
zone_exceptions = {}
call_exceptions_index = {}
prefixes_index = {}
invalid_operations_index = {}
zone_exceptions_index = {}
cty_tree = ET.parse(cty_xml_filename)
root = cty_tree.getroot()
#retrieve ADIF Country Entities
cty_entities = cty_tree.find("entities")
self._logger.debug("total entities: " + str(len(cty_entities)))
if len(cty_entities) > 1:
for cty_entity in cty_entities:
try:
entity = {}
for item in cty_entity:
if item.tag == "name":
entity[const.COUNTRY] = unicode(item.text)
self._logger.debug(unicode(item.text))
elif item.tag == "prefix":
entity[const.PREFIX] = unicode(item.text)
elif item.tag == "deleted":
if item.text == "TRUE":
entity[const.DELETED] = True
else:
entity[const.DELETED] = False
elif item.tag == "cqz":
entity[const.CQZ] = int(item.text)
elif item.tag == "cont":
entity[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
entity[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
entity[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.END] = dt.replace(tzinfo=UTC)
elif item.tag == "whitelist":
if item.text == "TRUE":
entity[const.WHITELIST] = True
else:
entity[const.WHITELIST] = False
elif item.tag == "whitelist_start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.WHITELIST_START] = dt.replace(tzinfo=UTC)
elif item.tag == "whitelist_end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
entity[const.WHITELIST_END] = dt.replace(tzinfo=UTC)
except AttributeError:
self._logger.error("Error while processing: ")
entities[int(cty_entity[0].text)] = entity
self._logger.debug(str(len(entities))+" Entities added")
else:
raise Exception("No Country Entities detected in XML File")
cty_exceptions = cty_tree.find("exceptions")
if len(cty_exceptions) > 1:
for cty_exception in cty_exceptions:
call_exception = {}
for item in cty_exception:
if item.tag == "call":
call = str(item.text)
if call in call_exceptions_index.keys():
call_exceptions_index[call].append(int(cty_exception.attrib["record"]))
else:
call_exceptions_index[call] = [int(cty_exception.attrib["record"])]
elif item.tag == "entity":
call_exception[const.COUNTRY] = unicode(item.text)
elif item.tag == "adif":
call_exception[const.ADIF] = int(item.text)
elif item.tag == "cqz":
call_exception[const.CQZ] = int(item.text)
elif item.tag == "cont":
call_exception[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
call_exception[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
call_exception[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
call_exception[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
call_exception[const.END] = dt.replace(tzinfo=UTC)
call_exceptions[int(cty_exception.attrib["record"])] = call_exception
self._logger.debug(str(len(call_exceptions))+" Exceptions added")
self._logger.debug(str(len(call_exceptions_index))+" unique Calls in Index ")
else:
raise Exception("No Exceptions detected in XML File")
cty_prefixes = cty_tree.find("prefixes")
if len(cty_prefixes) > 1:
for cty_prefix in cty_prefixes:
prefix = {}
for item in cty_prefix:
pref = None
if item.tag == "call":
#create index for this prefix
call = str(item.text)
if call in prefixes_index.keys():
prefixes_index[call].append(int(cty_prefix.attrib["record"]))
else:
prefixes_index[call] = [int(cty_prefix.attrib["record"])]
if item.tag == "entity":
prefix[const.COUNTRY] = unicode(item.text)
elif item.tag == "adif":
prefix[const.ADIF] = int(item.text)
elif item.tag == "cqz":
prefix[const.CQZ] = int(item.text)
elif item.tag == "cont":
prefix[const.CONTINENT] = unicode(item.text)
elif item.tag == "long":
prefix[const.LONGITUDE] = float(item.text)
elif item.tag == "lat":
prefix[const.LATITUDE] = float(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
prefix[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
prefix[const.END] = dt.replace(tzinfo=UTC)
prefixes[int(cty_prefix.attrib["record"])] = prefix
self._logger.debug(str(len(prefixes))+" Prefixes added")
self._logger.debug(str(len(prefixes_index))+" unique Prefixes in Index")
else:
raise Exception("No Prefixes detected in XML File")
cty_inv_operations = cty_tree.find("invalid_operations")
if len(cty_inv_operations) > 1:
for cty_inv_operation in cty_inv_operations:
invalid_operation = {}
for item in cty_inv_operation:
call = None
if item.tag == "call":
call = str(item.text)
if call in invalid_operations_index.keys():
invalid_operations_index[call].append(int(cty_inv_operation.attrib["record"]))
else:
invalid_operations_index[call] = [int(cty_inv_operation.attrib["record"])]
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
invalid_operation[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
invalid_operation[const.END] = dt.replace(tzinfo=UTC)
invalid_operations[int(cty_inv_operation.attrib["record"])] = invalid_operation
self._logger.debug(str(len(invalid_operations))+" Invalid Operations added")
self._logger.debug(str(len(invalid_operations_index))+" unique Calls in Index")
else:
raise Exception("No records for invalid operations detected in XML File")
cty_zone_exceptions = cty_tree.find("zone_exceptions")
if len(cty_zone_exceptions) > 1:
for cty_zone_exception in cty_zone_exceptions:
zoneException = {}
for item in cty_zone_exception:
call = None
if item.tag == "call":
call = str(item.text)
if call in zone_exceptions_index.keys():
zone_exceptions_index[call].append(int(cty_zone_exception.attrib["record"]))
else:
zone_exceptions_index[call] = [int(cty_zone_exception.attrib["record"])]
elif item.tag == "zone":
zoneException[const.CQZ] = int(item.text)
elif item.tag == "start":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
zoneException[const.START] = dt.replace(tzinfo=UTC)
elif item.tag == "end":
dt = datetime.strptime(item.text[:19], '%Y-%m-%dT%H:%M:%S')
zoneException[const.END] = dt.replace(tzinfo=UTC)
zone_exceptions[int(cty_zone_exception.attrib["record"])] = zoneException
self._logger.debug(str(len(zone_exceptions))+" Zone Exceptions added")
self._logger.debug(str(len(zone_exceptions_index))+" unique Calls in Index")
else:
raise Exception("No records for zone exceptions detected in XML File")
result = {
"entities" : entities,
"call_exceptions" : call_exceptions,
"prefixes" : prefixes,
"invalid_operations" : invalid_operations,
"zone_exceptions" : zone_exceptions,
"prefixes_index" : prefixes_index,
"call_exceptions_index" : call_exceptions_index,
"invalid_operations_index" : invalid_operations_index,
"zone_exceptions_index" : zone_exceptions_index,
}
return result
def _parse_country_file(self, cty_file, country_mapping_filename=None):
import plistlib
cty_list = None
entities = {}
exceptions = {}
prefixes = {}
exceptions_index = {}
prefixes_index = {}
exceptions_counter = 0
prefixes_counter = 0
mapping = None
with open(country_mapping_filename, "r") as f:
mapping = json.loads(f.read(),encoding='UTF-8')
cty_list = plistlib.readPlist(cty_file)
for item in cty_list:
entry = {}
call = str(item)
entry[const.COUNTRY] = unicode(cty_list[item]["Country"])
if mapping:
entry[const.ADIF] = int(mapping[cty_list[item]["Country"]])
entry[const.CQZ] = int(cty_list[item]["CQZone"])
entry[const.ITUZ] = int(cty_list[item]["ITUZone"])
entry[const.CONTINENT] = unicode(cty_list[item]["Continent"])
entry[const.LATITUDE] = float(cty_list[item]["Latitude"])
entry[const.LONGITUDE] = float(cty_list[item]["Longitude"])*(-1)
if cty_list[item]["ExactCallsign"]:
if call in exceptions_index.keys():
exceptions_index[call].append(exceptions_counter)
else:
exceptions_index[call] = [exceptions_counter]
exceptions[exceptions_counter] = entry
exceptions_counter += 1
else:
if call in prefixes_index.keys():
prefixes_index[call].append(prefixes_counter)
else:
prefixes_index[call] = [prefixes_counter]
prefixes[prefixes_counter] = entry
prefixes_counter += 1
self._logger.debug(str(len(prefixes))+" Prefixes added")
self._logger.debug(str(len(prefixes_index))+" Prefixes in Index")
self._logger.debug(str(len(exceptions))+" Exceptions added")
self._logger.debug(str(len(exceptions_index))+" Exceptions in Index")
result = {
"prefixes" : prefixes,
"exceptions" : exceptions,
"prefixes_index" : prefixes_index,
"exceptions_index" : exceptions_index,
}
return result
def _generate_random_word(self, length):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
def _check_html_response(self, response):
error1 = "Access to this form requires a valid API key. For more info see: http://www.clublog.org/need_api.php"
error2 = "Invalid or missing API Key"
if response.status_code == requests.codes.ok:
return True
else:
err_str = "HTTP Status Code: " + str(response.status_code) + " HTTP Response: " + str(response.text)
self._logger.error(err_str)
if response.status_code == 403:
raise APIKeyMissingError
else:
raise LookupError(err_str)
def _serialize_data(self, my_dict):
new_dict = {}
for item in my_dict:
if isinstance(my_dict[item], datetime):
new_dict[item] = my_dict[item].strftime('%Y-%m-%d%H:%M:%S')
else:
new_dict[item] = str(my_dict[item])
return json.dumps(new_dict)
def _deserialize_data(self, json_data):
my_dict = json.loads(json_data.decode('utf8'), encoding='UTF-8')
for item in my_dict:
if item == const.ADIF:
my_dict[item] = int(my_dict[item])
elif item == const.DELETED:
my_dict[item] = self._str_to_bool(my_dict[item])
elif item == const.CQZ:
my_dict[item] = int(my_dict[item])
elif item == const.ITUZ:
my_dict[item] = int(my_dict[item])
elif item == const.LATITUDE:
my_dict[item] = float(my_dict[item])
elif item == const.LONGITUDE:
my_dict[item] = float(my_dict[item])
elif item == const.START:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.END:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST_START:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST_END:
my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC)
elif item == const.WHITELIST:
my_dict[item] = self._str_to_bool(my_dict[item])
else:
my_dict[item] = unicode(my_dict[item])
return my_dict
def _str_to_bool(self, input):
if input.lower() == "true":
return True
elif input.lower() == "false":
return False
else:
raise KeyError
| true
| true
|
1c411bfc91d017fdf73488a0d02e0e2a2a808b40
| 11,782
|
py
|
Python
|
letterboxd/api.py
|
stevenrayesky/letterboxd
|
d231eda473ff81ce2f5f94f1e4f71835c413c81a
|
[
"MIT"
] | 25
|
2018-07-24T22:30:45.000Z
|
2021-06-03T21:03:25.000Z
|
letterboxd/api.py
|
stevenrayesky/letterboxd
|
d231eda473ff81ce2f5f94f1e4f71835c413c81a
|
[
"MIT"
] | 313
|
2018-06-22T04:52:59.000Z
|
2021-07-16T15:25:43.000Z
|
letterboxd/api.py
|
stevenrayesky/letterboxd
|
d231eda473ff81ce2f5f94f1e4f71835c413c81a
|
[
"MIT"
] | 6
|
2018-06-22T02:45:49.000Z
|
2021-04-27T04:46:52.000Z
|
import hashlib
import hmac
import json
import logging
import os
import time
import uuid
import requests
logging.getLogger(__name__)
CHARLES_PROXY = "http://localhost:8888/"
CHARLES_CERTIFICATE = os.environ.get("CHARLES_CERTIFICATE", None)
CHARLES = os.environ.get("CHARLES", None)
class API:
"""
Communication methods for the Letterboxd API
"""
def __init__(self, api_base, api_key, api_secret):
"""
This method will start the shared requests session for the Letterboxd
API. If the API key and secret are not passed, the initializer will
attempt to get them from the environment variables.
:param api_base: str - the base URL of the API endpoints,
including version number
:param api_key: str - API key provided by Letterboxd
:param api_secret: str - API shared secret provided by Letterboxd
"""
self.api_base = api_base
self.api_key = api_key
self.api_secret = api_secret
self.user = None
if self.api_key == "":
# If the API key wasn't passed in
class APIKeyMissingError(Exception):
pass
raise APIKeyMissingError(
"All methods require an API key. See "
"https://letterboxd.com/api-coming-soon/ "
"for more information"
)
if self.api_secret == "":
# If the API shared secret wasn't passed in
class APISecretMissingError(Exception):
pass
raise APISecretMissingError(
"All methods require an API secret. See "
"https://letterboxd.com/api-coming-soon/ "
"for more information"
)
# Start the shared requests session
self.session = requests.Session()
self.session.params = {}
def api_call(self, path, params={}, form=None, headers={}, method="get"):
"""
The workhorse method of calls to the Letterboxd API
:param path: str - URL endpoint path for the desired service
:param params: dict - request parameters
:param form: str - form information, likely from the auth.py call
:param headers: dict - request parameters
:param method: str - HTML methods, [get, post, put, patch, delete]
:return: requests.Response object
"""
# If we have an oAuth token
if self.user:
headers["Authorization"] = f"Bearer {self.user.token}"
url = f"{self.api_base}/{path}"
logging.debug(
f"\n"
f"url: {url}\n"
f"params: {params}\n"
f"form: {form}\n"
f"headers: {headers}\n"
f"method: {method}\n"
f"-------------------------"
)
if form:
# `form` seems to only be used in an oAuth call?
# should some of this code be in there instead?
logging.debug("API.api_call() if form")
headers["Content-Type"] = "application/x-www-form-urlencoded"
# Prepare the request
prepared_dict = self.__prepare_request(
url, body=form, headers=headers, method=method
)
prepared_request = prepared_dict["prepared_request"]
signature = prepared_dict["signature"]
# Add the signature to the headers
prepared_request.headers["Authorization"] = f"Signature {signature}"
elif method.lower() in ["post", "put", "patch", "delete"]:
logging.debug(
"API.api_call() elif method.lower() in "
'["post", "put", "patch", "delete"]:'
)
params = self.__remove_empty_from_dict(params)
# JSON-encode the body
body = json.dumps(params)
headers["Content-Type"] = "application/json"
# prepare the request
prepared_dict = self.__prepare_request(
url, body=body, headers=headers, method=method
)
prepared_request = prepared_dict["prepared_request"]
signature = prepared_dict["signature"]
# Attach the signature
prepared_request.prepare_url(prepared_request.url, {"signature": signature})
else:
logging.debug("API.api_call() else:")
# It's a GET
# Prepare the request
prepared_dict = self.__prepare_request(
url, params=params, headers=headers, method=method
)
prepared_request = prepared_dict["prepared_request"]
signature = prepared_dict["signature"]
logging.debug(prepared_request.url)
# Add the signature to the end of the params in the url
prepared_request.prepare_url(prepared_request.url, {"signature": signature})
logging.debug(
f"API.api_call() prepared_request\n"
f"method: {prepared_request.method}\n"
f"url: {prepared_request.url}\n"
f"headers: {prepared_request.headers}\n"
f"body: {prepared_request.body}"
)
try:
# If we've set the environment variable, run with Charles proxy
if CHARLES == "True":
# First, make sure we have the correct settings
if (
logging.getLogger().isEnabledFor(logging.DEBUG)
and CHARLES_CERTIFICATE
):
logging.debug("Send prepared_request through Charles")
proxies = {"http": CHARLES_PROXY, "https": CHARLES_PROXY}
self.session.verify = CHARLES_CERTIFICATE
# send the request through the proxy
response = self.session.send(prepared_request, proxies=proxies)
else:
# send the request normally
logging.debug("Send prepared_request")
response = self.session.send(prepared_request)
except ConnectionError as error:
logging.error(error)
raise
# Return the response
logging.debug(f"api_call() response.status_code: {response.status_code}")
if response.ok:
return response
else:
response.raise_for_status()
return response
# -------------------------
# Private methods
def __prepare_request(
self, url, params={}, body=[], headers={}, method="get", form=False
):
"""
Prepare the request and sign it
:param url: string
:param params: dict
:param form: bool
:param headers: dict
:param method: string - get, post, put, patch, delete
:return: dict - {'prepared_request', 'signature'}
"""
# Add the request params required for uniquely identifying the request
params = self.__add_unique_params(params)
# Prepare the request and add it to the current requests session
request = requests.Request(
method.upper(), url, params=params, data=body, headers=headers
)
prepared_request = self.session.prepare_request(request)
logging.debug(f"prepared url: {prepared_request.url}")
# Hash the request signature
signature = self.__sign(
method=prepared_request.method,
url=prepared_request.url,
body=prepared_request.body,
)
return {"prepared_request": prepared_request, "signature": signature}
def __remove_empty_from_dict(self, dirty_dict):
"""
Takes a dictionary recursively removes all None and "" values
:param dirty_dict: dict
:return: dict
"""
logging.debug(f"params: {dirty_dict}")
cleaned_dict = {}
for key, value in dirty_dict.items():
logging.debug(f"key: {key}, value: {value}")
if (value is None) or (value is ""):
logging.debug("Toss the value!")
elif isinstance(value, dict):
this_dict = self.__remove_empty_from_dict(value)
cleaned_dict[key] = this_dict
elif isinstance(value, tuple) or isinstance(value, list):
cleaned_dict[key] = self.__remove_empty_from_list(value)
else:
cleaned_dict[key] = value
logging.debug("-------------------------")
logging.debug(f"result: {cleaned_dict}")
return cleaned_dict
def __remove_empty_from_list(self, dirty_list):
"""
Takes a tuple or list and recursively removes all None and "" values
:param dirty_list: tuple or list
:return: list
"""
cleaned_list = []
for __item in dirty_list:
logging.debug(__item)
if __item is "" or __item is None:
logging.debug(f"item {__item} is None")
pass
elif isinstance(__item, dict):
logging.debug(f"item {__item} is dict")
cleaned_list.append(self.__remove_empty_from_dict(__item))
elif isinstance(__item, tuple) or isinstance(__item, list):
logging.debug(f"item {__item} is tuple or list")
cleaned_list.append(self.__remove_empty_from_list(__item))
else:
logging.debug(f"item {__item} is else")
cleaned_list.append(__item)
return cleaned_list
def __add_unique_params(self, params):
"""
Adds the metabody params required for signing the request
:param params: dict
:return: dict
"""
params["apikey"] = self.api_key
# nonce: UUID string, must be unique for each API request
params["nonce"] = uuid.uuid4()
# timestamp: number of seconds since epoch, Jan 1, 1970 (UTC)
params["timestamp"] = int(time.time())
return params
def __sign(self, method, url, body=""):
"""
Create a salted string as bytes, of the form [METHOD]\x00[URL]\x00[BODY],
where [METHOD] is GET, POST, etc., [URL] is the fully-qualified request
URL including the apikey, nonce, timestamp and any other method parameters,
and [BODY] is a JSON-encoded string (for POST, PATCH and DELETE requests)
or empty (for GET requests). Next, create a [SIGNATURE] from the salted
string by applying a lower-case HMAC/SHA-256 transformation, using your
API Secret, and append it to your API request URL as the final query
parameter: …&signature=[SIGNATURE]
Notes: you must specify a Content-Type: application/json request header
if [BODY] is JSON-encoded. The apikey parameter is your supplied API Key.
The nonce parameter should be a UUID string and must be unique for each
API request. The timestamp parameter is the number of seconds since
Jan 1, 1970 (UTC), also know as "UNIX Epoch time."
:param method: str - get, post, put, patch, delete
:param url: str
:param body: str - JSON-encoded
:return: str
"""
# Create the salted bytestring
if body is None:
body = ""
signing_bytestring = b"\x00".join(
[str.encode(method), str.encode(url), str.encode(body)]
)
logging.debug(f"signing_bytestring: {signing_bytestring}")
# applying an HMAC/SHA-256 transformation, using our API Secret
signature = hmac.new(
str.encode(self.api_secret), signing_bytestring, digestmod=hashlib.sha256
)
# get the string representation of the hash
signature_string = signature.hexdigest()
return signature_string
| 38.503268
| 88
| 0.583687
|
import hashlib
import hmac
import json
import logging
import os
import time
import uuid
import requests
logging.getLogger(__name__)
CHARLES_PROXY = "http://localhost:8888/"
CHARLES_CERTIFICATE = os.environ.get("CHARLES_CERTIFICATE", None)
CHARLES = os.environ.get("CHARLES", None)
class API:
def __init__(self, api_base, api_key, api_secret):
self.api_base = api_base
self.api_key = api_key
self.api_secret = api_secret
self.user = None
if self.api_key == "":
class APIKeyMissingError(Exception):
pass
raise APIKeyMissingError(
"All methods require an API key. See "
"https://letterboxd.com/api-coming-soon/ "
"for more information"
)
if self.api_secret == "":
# If the API shared secret wasn't passed in
class APISecretMissingError(Exception):
pass
raise APISecretMissingError(
"All methods require an API secret. See "
"https://letterboxd.com/api-coming-soon/ "
"for more information"
)
self.session = requests.Session()
self.session.params = {}
def api_call(self, path, params={}, form=None, headers={}, method="get"):
if self.user:
headers["Authorization"] = f"Bearer {self.user.token}"
url = f"{self.api_base}/{path}"
logging.debug(
f"\n"
f"url: {url}\n"
f"params: {params}\n"
f"form: {form}\n"
f"headers: {headers}\n"
f"method: {method}\n"
f"-------------------------"
)
if form:
logging.debug("API.api_call() if form")
headers["Content-Type"] = "application/x-www-form-urlencoded"
prepared_dict = self.__prepare_request(
url, body=form, headers=headers, method=method
)
prepared_request = prepared_dict["prepared_request"]
signature = prepared_dict["signature"]
prepared_request.headers["Authorization"] = f"Signature {signature}"
elif method.lower() in ["post", "put", "patch", "delete"]:
logging.debug(
"API.api_call() elif method.lower() in "
'["post", "put", "patch", "delete"]:'
)
params = self.__remove_empty_from_dict(params)
body = json.dumps(params)
headers["Content-Type"] = "application/json"
prepared_dict = self.__prepare_request(
url, body=body, headers=headers, method=method
)
prepared_request = prepared_dict["prepared_request"]
signature = prepared_dict["signature"]
prepared_request.prepare_url(prepared_request.url, {"signature": signature})
else:
logging.debug("API.api_call() else:")
# Prepare the request
prepared_dict = self.__prepare_request(
url, params=params, headers=headers, method=method
)
prepared_request = prepared_dict["prepared_request"]
signature = prepared_dict["signature"]
logging.debug(prepared_request.url)
# Add the signature to the end of the params in the url
prepared_request.prepare_url(prepared_request.url, {"signature": signature})
logging.debug(
f"API.api_call() prepared_request\n"
f"method: {prepared_request.method}\n"
f"url: {prepared_request.url}\n"
f"headers: {prepared_request.headers}\n"
f"body: {prepared_request.body}"
)
try:
# If we've set the environment variable, run with Charles proxy
if CHARLES == "True":
if (
logging.getLogger().isEnabledFor(logging.DEBUG)
and CHARLES_CERTIFICATE
):
logging.debug("Send prepared_request through Charles")
proxies = {"http": CHARLES_PROXY, "https": CHARLES_PROXY}
self.session.verify = CHARLES_CERTIFICATE
response = self.session.send(prepared_request, proxies=proxies)
else:
logging.debug("Send prepared_request")
response = self.session.send(prepared_request)
except ConnectionError as error:
logging.error(error)
raise
logging.debug(f"api_call() response.status_code: {response.status_code}")
if response.ok:
return response
else:
response.raise_for_status()
return response
def __prepare_request(
self, url, params={}, body=[], headers={}, method="get", form=False
):
params = self.__add_unique_params(params)
request = requests.Request(
method.upper(), url, params=params, data=body, headers=headers
)
prepared_request = self.session.prepare_request(request)
logging.debug(f"prepared url: {prepared_request.url}")
signature = self.__sign(
method=prepared_request.method,
url=prepared_request.url,
body=prepared_request.body,
)
return {"prepared_request": prepared_request, "signature": signature}
def __remove_empty_from_dict(self, dirty_dict):
logging.debug(f"params: {dirty_dict}")
cleaned_dict = {}
for key, value in dirty_dict.items():
logging.debug(f"key: {key}, value: {value}")
if (value is None) or (value is ""):
logging.debug("Toss the value!")
elif isinstance(value, dict):
this_dict = self.__remove_empty_from_dict(value)
cleaned_dict[key] = this_dict
elif isinstance(value, tuple) or isinstance(value, list):
cleaned_dict[key] = self.__remove_empty_from_list(value)
else:
cleaned_dict[key] = value
logging.debug("-------------------------")
logging.debug(f"result: {cleaned_dict}")
return cleaned_dict
def __remove_empty_from_list(self, dirty_list):
cleaned_list = []
for __item in dirty_list:
logging.debug(__item)
if __item is "" or __item is None:
logging.debug(f"item {__item} is None")
pass
elif isinstance(__item, dict):
logging.debug(f"item {__item} is dict")
cleaned_list.append(self.__remove_empty_from_dict(__item))
elif isinstance(__item, tuple) or isinstance(__item, list):
logging.debug(f"item {__item} is tuple or list")
cleaned_list.append(self.__remove_empty_from_list(__item))
else:
logging.debug(f"item {__item} is else")
cleaned_list.append(__item)
return cleaned_list
def __add_unique_params(self, params):
params["apikey"] = self.api_key
params["nonce"] = uuid.uuid4()
params["timestamp"] = int(time.time())
return params
def __sign(self, method, url, body=""):
if body is None:
body = ""
signing_bytestring = b"\x00".join(
[str.encode(method), str.encode(url), str.encode(body)]
)
logging.debug(f"signing_bytestring: {signing_bytestring}")
signature = hmac.new(
str.encode(self.api_secret), signing_bytestring, digestmod=hashlib.sha256
)
signature_string = signature.hexdigest()
return signature_string
| true
| true
|
1c411dfc20d51e3bd775b4fd00e174a837526fc1
| 4,716
|
py
|
Python
|
pa300_meas.py
|
wataash/SUSS
|
bfdf96ff01fb33a2adc8104f30a27dea435ea9f3
|
[
"MIT"
] | 3
|
2016-01-08T02:38:59.000Z
|
2017-06-11T05:43:09.000Z
|
pa300_meas.py
|
wataash/instr
|
bfdf96ff01fb33a2adc8104f30a27dea435ea9f3
|
[
"MIT"
] | null | null | null |
pa300_meas.py
|
wataash/instr
|
bfdf96ff01fb33a2adc8104f30a27dea435ea9f3
|
[
"MIT"
] | null | null | null |
from itertools import product
import time
import lib.algorithms as al
import lib.constants as c
from lib.database import Database, update_fit_R3
from instr.agilent4156c import Agilent4156C
from instr.suss_pa300 import SussPA300
from vi_meas import meas_vi_double
# Configurations ---------------------------------------------------------------
sample = 'dummy_sample'
inst = 'suss_test'
# inst = 'suss_BD_test'
# inst = 'suss'
debug_mode = False # Set True during development without instruments.
agi_comp = 0.010 # Compliance (A)
agi_Vs = [0.3, -0.3] # Sweep voltages for suss_test
# agi_Vs = [1.5, -1.5] # Sweep voltages for suss_BD_test
# agi_Vs = [1.0, -1.0] # Sweep voltages for suss
# Setup ------------------------------------------------------------------------
if debug_mode:
sample = 'dummy_sample'
if not debug_mode:
import visa
db_rds = Database(**c.mysql_config)
db_read = Database(user='readonly', database='master_db')
sql = ('SELECT mask, dX, dY, Xmin, Xmax, Ymin, Ymax FROM v02_sample '
'WHERE sample=%s')
mask, dX, dY, X_min, X_max, Y_min, Y_max = db_rds.q_row_abs(sql, (sample,))
# XYs = spiral_XYs(X_min, X_max, Y_min, Y_max)
XYs = list(product(range(X_min, X_max+1), range(Y_min, Y_max+1)))
XYs = sorted(XYs, key=lambda x: (x[1], x[0]))
sql = ('SELECT mesa_id, xm_probe, ym_probe FROM v03_sample_mesa '
'WHERE sample=%s')
dat_xypr_default = db_rds.q_all(sql, (sample,))
dic_mesaid_xypr_default = {mesa_id: xy for mesa_id, *xy in dat_xypr_default}
sql = ('SELECT mesa_id, xm_probe, ym_probe FROM suss_xm_ym '
'WHERE sample=%s')
dat_xypr_spec = db_rds.q_all(sql, (sample,))
dic_mesaid_xypr_spec = {mesa_id: xy for mesa_id, *xy in dat_xypr_spec}
sql = 'SELECT mesa_id, mesa FROM mesa WHERE mask=%s'
dat_mesa = db_rds.q_all_abs(sql, (mask,))
dic_mesaid_mesa = {mesa_id: mesa for mesa_id, mesa in dat_mesa}
if debug_mode:
agi = Agilent4156C(False)
suss = SussPA300()
else:
rm = visa.ResourceManager()
print(rm.list_resources())
agi_rsrc = rm.open_resource('GPIB0::18::INSTR')
suss_rsrc = rm.open_resource('GPIB0::7::INSTR')
agi = Agilent4156C(False, rsrc=agi_rsrc)
suss = SussPA300(rsrc=suss_rsrc)
# Measure ----------------------------------------------------------------------
first_measurement = True
resp = input('Approaching to z_separate!\n')
if resp != 'yes':
exit()
suss.approach_separate()
# Measure I-Vs. Be sure separate!
mesa_ids = sorted(dic_mesaid_xypr_default)
for mesa_id in mesa_ids:
print('{} (mesa_id {})'.format(dic_mesaid_mesa[mesa_id], mesa_id))
if mesa_id in [1, 2, 5, 6, 7, 8]:
print('Skip mesa.')
continue
for (X, Y) in XYs:
if first_measurement and sample == 'dummy_sample' and \
not (mesa_id == 3 and (X, Y) == (3, 4)):
print('({},{})'.format(X, Y), end=' ')
continue
if inst != 'suss_test':
sql = ('SELECT suss_R2 FROM v04_device '
'WHERE sample=%s AND mesa_id=%s AND X=%s AND Y=%s')
R2 = db_read.q_single_abs(sql, (sample, mesa_id, X, Y,))
if al.num_9th(R2) < 1.5:
print('NG({},{})'.format(X, Y), end=' ')
continue
print('X{}Y{}'.format(X, Y))
if mesa_id in dic_mesaid_xypr_spec:
xs = dic_mesaid_xypr_spec[mesa_id][0] + (X - X_min) * dX
ys = dic_mesaid_xypr_spec[mesa_id][1] + (Y - Y_min) * dY
else:
xs = dic_mesaid_xypr_default[mesa_id][0] + (X - X_min) * dX
ys = dic_mesaid_xypr_default[mesa_id][1] + (Y - Y_min) * dY
suss.safe_move_contact('H', -xs, -ys)
if first_measurement:
input('Contact the prober.')
first_measurement = False
for V in agi_Vs:
mesa = dic_mesaid_mesa[mesa_id]
print('Measure {}...'.format(V))
# vis, aborted = meas_vi_double(agi, db, sample, mesa, X, Y,
# 'SUSS PA300 + Agilent 4156C',
# V, v_points=101, i_limit=10e-3)
# TODO hard code
vis, aborted = meas_vi_double(agi, db_rds, sample, mesa, X, Y,
inst,
V, v_points=101, i_limit=agi_comp)
# time.sleep(3) # wait replication TODO test
# update_fit_R3(db_read, db, sample, mesa, X, Y) # TODO test
db_rds.cnx.commit()
print('Committed.')
if debug_mode:
time.sleep(1) # To avoid duplicates of "t0" in database
if aborted:
break
suss.separate_separate()
input('Done.')
pass # Breakpoint
| 37.728
| 80
| 0.57782
|
from itertools import product
import time
import lib.algorithms as al
import lib.constants as c
from lib.database import Database, update_fit_R3
from instr.agilent4156c import Agilent4156C
from instr.suss_pa300 import SussPA300
from vi_meas import meas_vi_double
sample = 'dummy_sample'
inst = 'suss_test'
debug_mode = False
agi_comp = 0.010
agi_Vs = [0.3, -0.3]
g_mode:
import visa
db_rds = Database(**c.mysql_config)
db_read = Database(user='readonly', database='master_db')
sql = ('SELECT mask, dX, dY, Xmin, Xmax, Ymin, Ymax FROM v02_sample '
'WHERE sample=%s')
mask, dX, dY, X_min, X_max, Y_min, Y_max = db_rds.q_row_abs(sql, (sample,))
XYs = list(product(range(X_min, X_max+1), range(Y_min, Y_max+1)))
XYs = sorted(XYs, key=lambda x: (x[1], x[0]))
sql = ('SELECT mesa_id, xm_probe, ym_probe FROM v03_sample_mesa '
'WHERE sample=%s')
dat_xypr_default = db_rds.q_all(sql, (sample,))
dic_mesaid_xypr_default = {mesa_id: xy for mesa_id, *xy in dat_xypr_default}
sql = ('SELECT mesa_id, xm_probe, ym_probe FROM suss_xm_ym '
'WHERE sample=%s')
dat_xypr_spec = db_rds.q_all(sql, (sample,))
dic_mesaid_xypr_spec = {mesa_id: xy for mesa_id, *xy in dat_xypr_spec}
sql = 'SELECT mesa_id, mesa FROM mesa WHERE mask=%s'
dat_mesa = db_rds.q_all_abs(sql, (mask,))
dic_mesaid_mesa = {mesa_id: mesa for mesa_id, mesa in dat_mesa}
if debug_mode:
agi = Agilent4156C(False)
suss = SussPA300()
else:
rm = visa.ResourceManager()
print(rm.list_resources())
agi_rsrc = rm.open_resource('GPIB0::18::INSTR')
suss_rsrc = rm.open_resource('GPIB0::7::INSTR')
agi = Agilent4156C(False, rsrc=agi_rsrc)
suss = SussPA300(rsrc=suss_rsrc)
first_measurement = True
resp = input('Approaching to z_separate!\n')
if resp != 'yes':
exit()
suss.approach_separate()
mesa_ids = sorted(dic_mesaid_xypr_default)
for mesa_id in mesa_ids:
print('{} (mesa_id {})'.format(dic_mesaid_mesa[mesa_id], mesa_id))
if mesa_id in [1, 2, 5, 6, 7, 8]:
print('Skip mesa.')
continue
for (X, Y) in XYs:
if first_measurement and sample == 'dummy_sample' and \
not (mesa_id == 3 and (X, Y) == (3, 4)):
print('({},{})'.format(X, Y), end=' ')
continue
if inst != 'suss_test':
sql = ('SELECT suss_R2 FROM v04_device '
'WHERE sample=%s AND mesa_id=%s AND X=%s AND Y=%s')
R2 = db_read.q_single_abs(sql, (sample, mesa_id, X, Y,))
if al.num_9th(R2) < 1.5:
print('NG({},{})'.format(X, Y), end=' ')
continue
print('X{}Y{}'.format(X, Y))
if mesa_id in dic_mesaid_xypr_spec:
xs = dic_mesaid_xypr_spec[mesa_id][0] + (X - X_min) * dX
ys = dic_mesaid_xypr_spec[mesa_id][1] + (Y - Y_min) * dY
else:
xs = dic_mesaid_xypr_default[mesa_id][0] + (X - X_min) * dX
ys = dic_mesaid_xypr_default[mesa_id][1] + (Y - Y_min) * dY
suss.safe_move_contact('H', -xs, -ys)
if first_measurement:
input('Contact the prober.')
first_measurement = False
for V in agi_Vs:
mesa = dic_mesaid_mesa[mesa_id]
print('Measure {}...'.format(V))
vis, aborted = meas_vi_double(agi, db_rds, sample, mesa, X, Y,
inst,
V, v_points=101, i_limit=agi_comp)
mmit()
print('Committed.')
if debug_mode:
time.sleep(1)
if aborted:
break
suss.separate_separate()
input('Done.')
pass
| true
| true
|
1c411e2187967a1829ea87f4d2e5bf6c61ad28a4
| 1,675
|
py
|
Python
|
youtube_search.py
|
smartao/lazyvert
|
e536908febf48885987df40fe497b899ed486927
|
[
"MIT"
] | null | null | null |
youtube_search.py
|
smartao/lazyvert
|
e536908febf48885987df40fe497b899ed486927
|
[
"MIT"
] | null | null | null |
youtube_search.py
|
smartao/lazyvert
|
e536908febf48885987df40fe497b899ed486927
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import urllib.request
import urllib.parse
import re
import os.path
arq_gmail = "gmail_mensagens.txt"
def youtube_search():
print('\n### Iniciando youtube_search.py ###\n')
urls = [] # Lista para armazenar os URLs dos videos
if os.path.exists(arq_gmail):
with open(arq_gmail) as arquivo:
for registro in arquivo:
# print('\n{}'.format(registro.rstrip())) # Teste impresssao
# Preparando a query para a pesquisa
query_string = urllib.parse.urlencode(
{"search_query": registro.rstrip()})
# Fazendo a pesquisa no youtube e pegando o resultado
html_content = urllib.request.urlopen(
"http://www.youtube.com/results?" + query_string)
# Pegando o identificador de 11 caracteres do youtube
search_results = re.findall(
r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
# Imprimindo o resultado na tela
# print("http://www.youtube.com/watch?v=" + search_results[0])
# Anexando a lista os resultando da pesquisa
urls.append("http://www.youtube.com/watch?v=" +
search_results[0])
# Gravando os resultados em um arquivo texto
with open('youtube_urls.txt', 'w') as saida:
for url in urls:
saida.write('{}\n'.format(url))
print('Videos processados com sucesso')
else:
print('Warning! Nao encontrando o arquivo {}!!!'.format(arq_gmail))
if __name__ == '__main__':
youtube_search()
| 33.5
| 79
| 0.573731
|
import urllib.request
import urllib.parse
import re
import os.path
arq_gmail = "gmail_mensagens.txt"
def youtube_search():
print('\n### Iniciando youtube_search.py ###\n')
urls = []
if os.path.exists(arq_gmail):
with open(arq_gmail) as arquivo:
for registro in arquivo:
query_string = urllib.parse.urlencode(
{"search_query": registro.rstrip()})
html_content = urllib.request.urlopen(
"http://www.youtube.com/results?" + query_string)
search_results = re.findall(
r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
# Imprimindo o resultado na tela
# print("http://www.youtube.com/watch?v=" + search_results[0])
# Anexando a lista os resultando da pesquisa
urls.append("http://www.youtube.com/watch?v=" +
search_results[0])
# Gravando os resultados em um arquivo texto
with open('youtube_urls.txt', 'w') as saida:
for url in urls:
saida.write('{}\n'.format(url))
print('Videos processados com sucesso')
else:
print('Warning! Nao encontrando o arquivo {}!!!'.format(arq_gmail))
if __name__ == '__main__':
youtube_search()
| true
| true
|
1c411e7e8c97e4d8472dd0905b19a56a6fd5cc5b
| 1,868
|
py
|
Python
|
tests/executors/test_celery_executor.py
|
diggzhang/airflow-dingit
|
41482b83130d5815b772840681fb36eb9bfa69b9
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-05-20T05:42:19.000Z
|
2020-05-20T05:42:19.000Z
|
tests/executors/test_celery_executor.py
|
diggzhang/airflow-dingit
|
41482b83130d5815b772840681fb36eb9bfa69b9
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 13
|
2018-11-30T18:18:32.000Z
|
2021-02-19T17:04:12.000Z
|
tests/executors/test_celery_executor.py
|
diggzhang/airflow-dingit
|
41482b83130d5815b772840681fb36eb9bfa69b9
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3
|
2018-01-23T15:42:58.000Z
|
2020-05-19T13:51:07.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
from airflow.executors.celery_executor import app
from airflow.executors.celery_executor import CeleryExecutor
from airflow.utils.state import State
from celery.contrib.testing.worker import start_worker
# leave this it is used by the test worker
import celery.contrib.testing.tasks
class CeleryExecutorTest(unittest.TestCase):
def test_celery_integration(self):
executor = CeleryExecutor()
executor.start()
with start_worker(app=app, logfile=sys.stdout, loglevel='debug'):
success_command = 'echo 1'
fail_command = 'exit 1'
executor.execute_async(key='success', command=success_command)
# errors are propagated for some reason
try:
executor.execute_async(key='fail', command=fail_command)
except:
pass
executor.running['success'] = True
executor.running['fail'] = True
executor.end(synchronous=True)
self.assertTrue(executor.event_buffer['success'], State.SUCCESS)
self.assertTrue(executor.event_buffer['fail'], State.FAILED)
self.assertNotIn('success', executor.tasks)
self.assertNotIn('fail', executor.tasks)
if __name__ == '__main__':
unittest.main()
| 33.357143
| 74
| 0.697002
|
import unittest
import sys
from airflow.executors.celery_executor import app
from airflow.executors.celery_executor import CeleryExecutor
from airflow.utils.state import State
from celery.contrib.testing.worker import start_worker
import celery.contrib.testing.tasks
class CeleryExecutorTest(unittest.TestCase):
def test_celery_integration(self):
executor = CeleryExecutor()
executor.start()
with start_worker(app=app, logfile=sys.stdout, loglevel='debug'):
success_command = 'echo 1'
fail_command = 'exit 1'
executor.execute_async(key='success', command=success_command)
try:
executor.execute_async(key='fail', command=fail_command)
except:
pass
executor.running['success'] = True
executor.running['fail'] = True
executor.end(synchronous=True)
self.assertTrue(executor.event_buffer['success'], State.SUCCESS)
self.assertTrue(executor.event_buffer['fail'], State.FAILED)
self.assertNotIn('success', executor.tasks)
self.assertNotIn('fail', executor.tasks)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c411e985e32ab7b41daea34f3f11bc48964f38d
| 1,193
|
py
|
Python
|
weedcoco/tests/test_utils.py
|
Sydney-Informatics-Hub/Weed-ID-Interchange
|
d27891c2148de54d03727f688f8b6c6c414ec09b
|
[
"MIT"
] | 14
|
2021-11-29T12:16:59.000Z
|
2022-03-20T00:26:10.000Z
|
weedcoco/tests/test_utils.py
|
Sydney-Informatics-Hub/Weed-ID-Interchange
|
d27891c2148de54d03727f688f8b6c6c414ec09b
|
[
"MIT"
] | 282
|
2020-07-10T00:52:21.000Z
|
2021-03-01T06:58:05.000Z
|
weedcoco/tests/test_utils.py
|
Sydney-Informatics-Hub/Weed-ID-Interchange
|
d27891c2148de54d03727f688f8b6c6c414ec09b
|
[
"MIT"
] | 2
|
2020-10-07T06:12:39.000Z
|
2021-02-02T05:21:32.000Z
|
import pytest
from weedcoco.utils import lookup_growth_stage_name
@pytest.mark.parametrize(
"idx,scheme,expected",
[
[10, "fine", "First leaf through coleoptile"],
[13, "fine", "3 leaves unfolded"],
[19, "fine", "9 or more leaves unfolded"],
[20, "fine", "No tillers"],
[10, "grain_ranges", "Seedling"],
[13, "grain_ranges", "Seedling"],
[19, "grain_ranges", "Seedling"],
[20, "grain_ranges", "Tillering"],
[10, "bbch_ranges", "Leaf development"],
[13, "bbch_ranges", "Leaf development"],
[19, "bbch_ranges", "Leaf development"],
[20, "bbch_ranges", "Formation of side shoots, tillering"],
],
)
def test_lookup_growth_stage_name(idx, scheme, expected):
assert expected == lookup_growth_stage_name(idx=idx, scheme=scheme)
@pytest.mark.parametrize(
"idx,scheme",
[
[10, "grains"],
[100, "grain_ranges"],
["10", "grain_ranges"],
["10", "bbch_ranges"],
["10", "fine"],
],
)
def test_lookup_growth_stage_name_invalid(idx, scheme):
with pytest.raises(Exception):
assert lookup_growth_stage_name(idx=idx, scheme=scheme)
| 30.589744
| 71
| 0.604359
|
import pytest
from weedcoco.utils import lookup_growth_stage_name
@pytest.mark.parametrize(
"idx,scheme,expected",
[
[10, "fine", "First leaf through coleoptile"],
[13, "fine", "3 leaves unfolded"],
[19, "fine", "9 or more leaves unfolded"],
[20, "fine", "No tillers"],
[10, "grain_ranges", "Seedling"],
[13, "grain_ranges", "Seedling"],
[19, "grain_ranges", "Seedling"],
[20, "grain_ranges", "Tillering"],
[10, "bbch_ranges", "Leaf development"],
[13, "bbch_ranges", "Leaf development"],
[19, "bbch_ranges", "Leaf development"],
[20, "bbch_ranges", "Formation of side shoots, tillering"],
],
)
def test_lookup_growth_stage_name(idx, scheme, expected):
assert expected == lookup_growth_stage_name(idx=idx, scheme=scheme)
@pytest.mark.parametrize(
"idx,scheme",
[
[10, "grains"],
[100, "grain_ranges"],
["10", "grain_ranges"],
["10", "bbch_ranges"],
["10", "fine"],
],
)
def test_lookup_growth_stage_name_invalid(idx, scheme):
with pytest.raises(Exception):
assert lookup_growth_stage_name(idx=idx, scheme=scheme)
| true
| true
|
1c41201f781afab2487923eaedfe69628b7c4df2
| 1,770
|
py
|
Python
|
test/util/rpcauth-test.py
|
c-sports/CSPN
|
44522f7ed5f9844adae2cdbd3d3b9688395f9f10
|
[
"MIT"
] | 3
|
2020-06-23T17:15:04.000Z
|
2021-05-03T03:37:42.000Z
|
test/util/rpcauth-test.py
|
c-sports/CSPN
|
44522f7ed5f9844adae2cdbd3d3b9688395f9f10
|
[
"MIT"
] | 2
|
2020-06-17T13:02:11.000Z
|
2021-01-13T13:14:15.000Z
|
test/util/rpcauth-test.py
|
c-sports/CSPN
|
44522f7ed5f9844adae2cdbd3d3b9688395f9f10
|
[
"MIT"
] | 3
|
2020-06-18T17:01:10.000Z
|
2022-02-02T00:07:56.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Copyright (c) 2018-2021 The CSPN Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test share/rpcauth/rpcauth.py
"""
import base64
import configparser
import hmac
import importlib
import os
import sys
import unittest
class TestRPCAuth(unittest.TestCase):
def setUp(self):
config = configparser.ConfigParser()
config_path = os.path.abspath(
os.path.join(os.sep, os.path.abspath(os.path.dirname(__file__)),
"../config.ini"))
with open(config_path, encoding="utf8") as config_file:
config.read_file(config_file)
sys.path.insert(0, os.path.dirname(config['environment']['RPCAUTH']))
self.rpcauth = importlib.import_module('rpcauth')
def test_generate_salt(self):
for i in range(16, 32 + 1):
self.assertEqual(len(self.rpcauth.generate_salt(i)), i * 2)
def test_generate_password(self):
password = self.rpcauth.generate_password()
expected_password = base64.urlsafe_b64encode(
base64.urlsafe_b64decode(password)).decode('utf-8')
self.assertEqual(expected_password, password)
def test_check_password_hmac(self):
salt = self.rpcauth.generate_salt(16)
password = self.rpcauth.generate_password()
password_hmac = self.rpcauth.password_to_hmac(salt, password)
m = hmac.new(bytearray(salt, 'utf-8'),
bytearray(password, 'utf-8'), 'SHA256')
expected_password_hmac = m.hexdigest()
self.assertEqual(expected_password_hmac, password_hmac)
if __name__ == '__main__':
unittest.main()
| 35.4
| 77
| 0.688701
|
import base64
import configparser
import hmac
import importlib
import os
import sys
import unittest
class TestRPCAuth(unittest.TestCase):
def setUp(self):
config = configparser.ConfigParser()
config_path = os.path.abspath(
os.path.join(os.sep, os.path.abspath(os.path.dirname(__file__)),
"../config.ini"))
with open(config_path, encoding="utf8") as config_file:
config.read_file(config_file)
sys.path.insert(0, os.path.dirname(config['environment']['RPCAUTH']))
self.rpcauth = importlib.import_module('rpcauth')
def test_generate_salt(self):
for i in range(16, 32 + 1):
self.assertEqual(len(self.rpcauth.generate_salt(i)), i * 2)
def test_generate_password(self):
password = self.rpcauth.generate_password()
expected_password = base64.urlsafe_b64encode(
base64.urlsafe_b64decode(password)).decode('utf-8')
self.assertEqual(expected_password, password)
def test_check_password_hmac(self):
salt = self.rpcauth.generate_salt(16)
password = self.rpcauth.generate_password()
password_hmac = self.rpcauth.password_to_hmac(salt, password)
m = hmac.new(bytearray(salt, 'utf-8'),
bytearray(password, 'utf-8'), 'SHA256')
expected_password_hmac = m.hexdigest()
self.assertEqual(expected_password_hmac, password_hmac)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c41201f86413e0c3d77e5d1e88b2aad459f51ae
| 1,212
|
py
|
Python
|
corecode/models.py
|
abid-anjum/PythonDjangoSchoolSystem
|
279f5d7b167883fda4d575d6d5621b63b991c232
|
[
"MIT"
] | 1
|
2021-04-04T15:25:26.000Z
|
2021-04-04T15:25:26.000Z
|
corecode/models.py
|
abid-anjum/PythonDjangoSchoolSystem
|
279f5d7b167883fda4d575d6d5621b63b991c232
|
[
"MIT"
] | null | null | null |
corecode/models.py
|
abid-anjum/PythonDjangoSchoolSystem
|
279f5d7b167883fda4d575d6d5621b63b991c232
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
from django.urls import reverse
# Create your models here.
class SiteConfig(models.Model):
""" Site Configurations """
key = models.SlugField()
value = models.CharField(max_length=200)
def __str__(self):
return self.key
class AcademicSession(models.Model):
""" Academic Session """
name = models.CharField(max_length=200, unique=True)
current = models.BooleanField(default=True)
class Meta:
ordering = ['-name']
def __str__(self):
return self.name
class AcademicTerm(models.Model):
""" Academic Term """
name = models.CharField(max_length=20, unique=True)
current = models.BooleanField(default=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Subject(models.Model):
""" Subject """
name = models.CharField(max_length=200, unique=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class StudentClass(models.Model):
name = models.CharField(max_length=200, unique=True)
class Meta:
verbose_name = "Class"
verbose_name_plural = "Classes"
ordering = ['name']
def __str__(self):
return self.name
| 19.868852
| 54
| 0.69637
|
from django.db import models
from django.utils import timezone
from django.urls import reverse
class SiteConfig(models.Model):
key = models.SlugField()
value = models.CharField(max_length=200)
def __str__(self):
return self.key
class AcademicSession(models.Model):
name = models.CharField(max_length=200, unique=True)
current = models.BooleanField(default=True)
class Meta:
ordering = ['-name']
def __str__(self):
return self.name
class AcademicTerm(models.Model):
name = models.CharField(max_length=20, unique=True)
current = models.BooleanField(default=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class Subject(models.Model):
name = models.CharField(max_length=200, unique=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class StudentClass(models.Model):
name = models.CharField(max_length=200, unique=True)
class Meta:
verbose_name = "Class"
verbose_name_plural = "Classes"
ordering = ['name']
def __str__(self):
return self.name
| true
| true
|
1c4120b92e8b0999d4d02d84a4bedb51e994d174
| 1,572
|
py
|
Python
|
rial_old/metadata/RIALModule.py
|
L3tum/RIAL
|
fba25f37434ff51e0de8e22ce834af589aba24e8
|
[
"BSD-3-Clause"
] | 2
|
2020-06-02T20:00:28.000Z
|
2020-06-06T02:15:49.000Z
|
rial_old/metadata/RIALModule.py
|
L3tum/RIAL
|
fba25f37434ff51e0de8e22ce834af589aba24e8
|
[
"BSD-3-Clause"
] | 51
|
2020-04-13T13:27:10.000Z
|
2020-06-12T11:38:36.000Z
|
rial_old/metadata/RIALModule.py
|
L3tum/RIAL
|
fba25f37434ff51e0de8e22ce834af589aba24e8
|
[
"BSD-3-Clause"
] | 2
|
2020-06-24T14:44:01.000Z
|
2020-11-29T04:08:54.000Z
|
from typing import List, Dict, Optional
from llvmlite.ir import Module, context
from rial.metadata.RIALFunction import RIALFunction
from rial.metadata.RIALIdentifiedStructType import RIALIdentifiedStructType
from rial.rial_types.RIALVariable import RIALVariable
class RIALModule(Module):
dependencies: List[str]
builtin_type_methods: Dict[str, List[str]]
structs: List[RIALIdentifiedStructType]
global_variables: List[RIALVariable]
rial_functions: List[RIALFunction]
filename: str
def __init__(self, name='', context=context.global_context):
super().__init__(name, context)
self.dependencies = list()
self.builtin_type_methods = dict()
self.structs = list()
self.global_variables = list()
self.rial_functions = list()
self.filename = ""
def get_global_safe(self: Module, name: str) -> Optional[RIALFunction]:
try:
return self.get_global(name)
except KeyError:
return None
def get_function(self, name: str) -> Optional[RIALFunction]:
return next((func for func in self.rial_functions if func.canonical_name == name), None)
def add_builtin_method(self, ty: str, method: str):
if not ty in self.builtin_type_methods:
self.builtin_type_methods[ty] = list()
if not method in self.builtin_type_methods[ty]:
self.builtin_type_methods[ty].append(method)
def get_rial_variable(self, name: str):
return next((glob for glob in self.global_variables if glob.name == name), None)
| 35.727273
| 96
| 0.696565
|
from typing import List, Dict, Optional
from llvmlite.ir import Module, context
from rial.metadata.RIALFunction import RIALFunction
from rial.metadata.RIALIdentifiedStructType import RIALIdentifiedStructType
from rial.rial_types.RIALVariable import RIALVariable
class RIALModule(Module):
dependencies: List[str]
builtin_type_methods: Dict[str, List[str]]
structs: List[RIALIdentifiedStructType]
global_variables: List[RIALVariable]
rial_functions: List[RIALFunction]
filename: str
def __init__(self, name='', context=context.global_context):
super().__init__(name, context)
self.dependencies = list()
self.builtin_type_methods = dict()
self.structs = list()
self.global_variables = list()
self.rial_functions = list()
self.filename = ""
def get_global_safe(self: Module, name: str) -> Optional[RIALFunction]:
try:
return self.get_global(name)
except KeyError:
return None
def get_function(self, name: str) -> Optional[RIALFunction]:
return next((func for func in self.rial_functions if func.canonical_name == name), None)
def add_builtin_method(self, ty: str, method: str):
if not ty in self.builtin_type_methods:
self.builtin_type_methods[ty] = list()
if not method in self.builtin_type_methods[ty]:
self.builtin_type_methods[ty].append(method)
def get_rial_variable(self, name: str):
return next((glob for glob in self.global_variables if glob.name == name), None)
| true
| true
|
1c4120c7b5399be0aa1163989112029f5ed99d31
| 7,810
|
py
|
Python
|
src/sage/groups/abelian_gps/dual_abelian_group_element.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | 1
|
2021-03-15T21:45:56.000Z
|
2021-03-15T21:45:56.000Z
|
src/sage/groups/abelian_gps/dual_abelian_group_element.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | null | null | null |
src/sage/groups/abelian_gps/dual_abelian_group_element.py
|
sensen1/sage
|
d6c5cd9be78cc448ee4c54bac93385b1244a234c
|
[
"BSL-1.0"
] | null | null | null |
"""
Elements (characters) of the dual group of a finite Abelian group
To obtain the dual group of a finite Abelian group, use the
:meth:`~sage.groups.abelian_gps.abelian_group.dual_group` method::
sage: F = AbelianGroup([2,3,5,7,8], names="abcde")
sage: F
Multiplicative Abelian group isomorphic to C2 x C3 x C5 x C7 x C8
sage: Fd = F.dual_group(names="ABCDE")
sage: Fd
Dual of Abelian Group isomorphic to Z/2Z x Z/3Z x Z/5Z x Z/7Z x Z/8Z
over Cyclotomic Field of order 840 and degree 192
The elements of the dual group can be evaluated on elements of the original group::
sage: a,b,c,d,e = F.gens()
sage: A,B,C,D,E = Fd.gens()
sage: A*B^2*D^7
A*B^2
sage: A(a)
-1
sage: B(b)
zeta840^140 - 1
sage: CC(_) # abs tol 1e-8
-0.499999999999995 + 0.866025403784447*I
sage: A(a*b)
-1
sage: (A*B*C^2*D^20*E^65).exponents()
(1, 1, 2, 6, 1)
sage: B^(-1)
B^2
AUTHORS:
- David Joyner (2006-07); based on abelian_group_element.py.
- David Joyner (2006-10); modifications suggested by William Stein.
- Volker Braun (2012-11) port to new Parent base. Use tuples for immutables.
Default to cyclotomic base ring.
"""
# ****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
# Copyright (C) 2006 David Joyner<wdjoyner@gmail.com>
# Copyright (C) 2012 Volker Braun<vbraun.name@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
import operator
from sage.arith.all import LCM
from sage.misc.all import prod
from sage.groups.abelian_gps.element_base import AbelianGroupElementBase
from functools import reduce
def add_strings(x, z=0):
"""
This was in sage.misc.misc but commented out. Needed to add
lists of strings in the word_problem method below.
Return the sum of the elements of x. If x is empty,
return z.
INPUT:
- ``x`` -- iterable
- ``z`` -- the ``0`` that will be returned if ``x`` is empty.
OUTPUT:
The sum of the elements of ``x``.
EXAMPLES::
sage: from sage.groups.abelian_gps.dual_abelian_group_element import add_strings
sage: add_strings([], z='empty')
'empty'
sage: add_strings(['a', 'b', 'c'])
'abc'
"""
if len(x) == 0:
return z
if not isinstance(x, list):
m = iter(x)
y = next(m)
return reduce(operator.add, m, y)
else:
return reduce(operator.add, x[1:], x[0])
def is_DualAbelianGroupElement(x):
"""
Test whether ``x`` is a dual Abelian group element.
INPUT:
- ``x`` -- anything.
OUTPUT:
Boolean.
EXAMPLES::
sage: from sage.groups.abelian_gps.dual_abelian_group import is_DualAbelianGroupElement
sage: F = AbelianGroup(5,[5,5,7,8,9],names = list("abcde")).dual_group()
sage: is_DualAbelianGroupElement(F)
False
sage: is_DualAbelianGroupElement(F.an_element())
True
"""
return isinstance(x, DualAbelianGroupElement)
class DualAbelianGroupElement(AbelianGroupElementBase):
"""
Base class for abelian group elements
"""
def __call__(self, g):
"""
Evaluate ``self`` on a group element ``g``.
OUTPUT:
An element in
:meth:`~sage.groups.abelian_gps.dual_abelian_group.DualAbelianGroup_class.base_ring`.
EXAMPLES::
sage: F = AbelianGroup(5, [2,3,5,7,8], names="abcde")
sage: a,b,c,d,e = F.gens()
sage: Fd = F.dual_group(names="ABCDE")
sage: A,B,C,D,E = Fd.gens()
sage: A*B^2*D^7
A*B^2
sage: A(a)
-1
sage: B(b)
zeta840^140 - 1
sage: CC(B(b)) # abs tol 1e-8
-0.499999999999995 + 0.866025403784447*I
sage: A(a*b)
-1
TESTS::
sage: F = AbelianGroup(1, [7], names="a")
sage: a, = F.gens()
sage: Fd = F.dual_group(names="A", base_ring=GF(29))
sage: A, = Fd.gens()
sage: A(a)
16
"""
F = self.parent().base_ring()
expsX = self.exponents()
expsg = g.exponents()
order = self.parent().gens_orders()
N = LCM(order)
order_not = [N / o for o in order]
zeta = F.zeta(N)
return F.prod(zeta ** (expsX[i] * expsg[i] * order_not[i])
for i in range(len(expsX)))
def word_problem(self, words, display=True):
"""
This is a rather hackish method and is included for completeness.
The word problem for an instance of DualAbelianGroup as it can
for an AbelianGroup. The reason why is that word problem
for an instance of AbelianGroup simply calls GAP (which
has abelian groups implemented) and invokes "EpimorphismFromFreeGroup"
and "PreImagesRepresentative". GAP does not have duals of
abelian groups implemented. So, by using the same name
for the generators, the method below converts the problem for
the dual group to the corresponding problem on the group
itself and uses GAP to solve that.
EXAMPLES::
sage: G = AbelianGroup(5,[3, 5, 5, 7, 8],names="abcde")
sage: Gd = G.dual_group(names="abcde")
sage: a,b,c,d,e = Gd.gens()
sage: u = a^3*b*c*d^2*e^5
sage: v = a^2*b*c^2*d^3*e^3
sage: w = a^7*b^3*c^5*d^4*e^4
sage: x = a^3*b^2*c^2*d^3*e^5
sage: y = a^2*b^4*c^2*d^4*e^5
sage: e.word_problem([u,v,w,x,y],display=False)
[[b^2*c^2*d^3*e^5, 245]]
The command e.word_problem([u,v,w,x,y],display=True) returns
the same list but also prints $e = (b^2*c^2*d^3*e^5)^245$.
"""
## First convert the problem to one using AbelianGroups
import copy
from sage.groups.abelian_gps.abelian_group import AbelianGroup
from sage.interfaces.all import gap
M = self.parent()
G = M.group()
gens = M.variable_names()
g = prod([G.gen(i)**(self.list()[i]) for i in range(G.ngens())])
gap.eval("l:=One(Rationals)") ## trick needed for LL line below to keep Sage from parsing
s1 = "gens := GeneratorsOfGroup(%s)"%G._gap_init_()
gap.eval(s1)
for i in range(len(gens)):
cmd = ("%s := gens["+str(i+1)+"]")%gens[i]
gap.eval(cmd)
s2 = "g0:=%s; gensH:=%s"%(str(g),words)
gap.eval(s2)
s3 = 'G:=Group(gens); H:=Group(gensH)'
gap.eval(s3)
phi = gap.eval("hom:=EpimorphismFromFreeGroup(H)")
l1 = gap.eval("ans:=PreImagesRepresentative(hom,g0)")
l2 = copy.copy(l1)
l4 = []
l3 = l1.split("*")
for i in range(1,len(words)+1):
l2 = l2.replace("x"+str(i),"("+str(words[i-1])+")")
l3 = eval(gap.eval("L3:=ExtRepOfObj(ans)"))
nn = eval(gap.eval("n:=Int(Length(L3)/2)"))
LL1 = eval(gap.eval("L4:=List([l..n],i->L3[2*i])")) ## note the l not 1
LL2 = eval(gap.eval("L5:=List([l..n],i->L3[2*i-1])")) ## note the l not 1
if display:
s = str(g)+" = "+add_strings(["("+str(words[LL2[i]-1])+")^"+str(LL1[i])+"*" for i in range(nn)])
m = len(s)
print(" ", s[:m-1], "\n")
return [[words[LL2[i]-1],LL1[i]] for i in range(nn)]
| 32.677824
| 108
| 0.559667
|
import operator
from sage.arith.all import LCM
from sage.misc.all import prod
from sage.groups.abelian_gps.element_base import AbelianGroupElementBase
from functools import reduce
def add_strings(x, z=0):
if len(x) == 0:
return z
if not isinstance(x, list):
m = iter(x)
y = next(m)
return reduce(operator.add, m, y)
else:
return reduce(operator.add, x[1:], x[0])
def is_DualAbelianGroupElement(x):
return isinstance(x, DualAbelianGroupElement)
class DualAbelianGroupElement(AbelianGroupElementBase):
def __call__(self, g):
F = self.parent().base_ring()
expsX = self.exponents()
expsg = g.exponents()
order = self.parent().gens_orders()
N = LCM(order)
order_not = [N / o for o in order]
zeta = F.zeta(N)
return F.prod(zeta ** (expsX[i] * expsg[i] * order_not[i])
for i in range(len(expsX)))
def word_problem(self, words, display=True):
gps.abelian_group import AbelianGroup
from sage.interfaces.all import gap
M = self.parent()
G = M.group()
gens = M.variable_names()
g = prod([G.gen(i)**(self.list()[i]) for i in range(G.ngens())])
gap.eval("l:=One(Rationals)") ()
gap.eval(s1)
for i in range(len(gens)):
cmd = ("%s := gens["+str(i+1)+"]")%gens[i]
gap.eval(cmd)
s2 = "g0:=%s; gensH:=%s"%(str(g),words)
gap.eval(s2)
s3 = 'G:=Group(gens); H:=Group(gensH)'
gap.eval(s3)
phi = gap.eval("hom:=EpimorphismFromFreeGroup(H)")
l1 = gap.eval("ans:=PreImagesRepresentative(hom,g0)")
l2 = copy.copy(l1)
l4 = []
l3 = l1.split("*")
for i in range(1,len(words)+1):
l2 = l2.replace("x"+str(i),"("+str(words[i-1])+")")
l3 = eval(gap.eval("L3:=ExtRepOfObj(ans)"))
nn = eval(gap.eval("n:=Int(Length(L3)/2)"))
LL1 = eval(gap.eval("L4:=List([l..n],i->L3[2*i])")) l(gap.eval("L5:=List([l..n],i->L3[2*i-1])")) y:
s = str(g)+" = "+add_strings(["("+str(words[LL2[i]-1])+")^"+str(LL1[i])+"*" for i in range(nn)])
m = len(s)
print(" ", s[:m-1], "\n")
return [[words[LL2[i]-1],LL1[i]] for i in range(nn)]
| true
| true
|
1c412155713ea11f66ed1ac5b32c4f06b6e448d8
| 3,847
|
py
|
Python
|
fairseq/models/fairseq_incremental_decoder.py
|
qibaoyuan/fairseq
|
eabd07fdcfd5b007d05428e81a31b7f3fc5de959
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/models/fairseq_incremental_decoder.py
|
qibaoyuan/fairseq
|
eabd07fdcfd5b007d05428e81a31b7f3fc5de959
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/models/fairseq_incremental_decoder.py
|
qibaoyuan/fairseq
|
eabd07fdcfd5b007d05428e81a31b7f3fc5de959
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq.models import FairseqDecoder
class FairseqIncrementalDecoder(FairseqDecoder):
"""Base class for incremental decoders.
Incremental decoding is a special mode at inference time where the Model
only receives a single timestep of input corresponding to the previous
output token (for teacher forcing) and must produce the next output
*incrementally*. Thus the model must cache any long-term state that is
needed about the sequence, e.g., hidden states, convolutional states, etc.
Compared to the standard :class:`FairseqDecoder` interface, the incremental
decoder interface allows :func:`forward` functions to take an extra keyword
argument (*incremental_state*) that can be used to cache state across
time-steps.
The :class:`FairseqIncrementalDecoder` interface also defines the
:func:`reorder_incremental_state` method, which is used during beam search
to select and reorder the incremental state based on the selection of beams.
To learn more about how incremental decoding works, refer to `this blog
<http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_.
"""
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict, optional): dictionary used for storing
state during :ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder incremental state.
This should be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
seen = set()
def apply_reorder_incremental_state(module):
if module != self and hasattr(module, 'reorder_incremental_state') \
and module not in seen:
seen.add(module)
module.reorder_incremental_state(incremental_state, new_order)
self.apply(apply_reorder_incremental_state)
def set_beam_size(self, beam_size):
"""Sets the beam size in the decoder and all children."""
if getattr(self, '_beam_size', -1) != beam_size:
seen = set()
def apply_set_beam_size(module):
if module != self and hasattr(module, 'set_beam_size') \
and module not in seen:
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
| 41.815217
| 103
| 0.666753
|
from fairseq.models import FairseqDecoder
class FairseqIncrementalDecoder(FairseqDecoder):
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
raise NotImplementedError
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
raise NotImplementedError
def reorder_incremental_state(self, incremental_state, new_order):
seen = set()
def apply_reorder_incremental_state(module):
if module != self and hasattr(module, 'reorder_incremental_state') \
and module not in seen:
seen.add(module)
module.reorder_incremental_state(incremental_state, new_order)
self.apply(apply_reorder_incremental_state)
def set_beam_size(self, beam_size):
if getattr(self, '_beam_size', -1) != beam_size:
seen = set()
def apply_set_beam_size(module):
if module != self and hasattr(module, 'set_beam_size') \
and module not in seen:
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
| true
| true
|
1c41218a618fd8499930cfb0946b992f436a8bd6
| 1,082
|
py
|
Python
|
kubernetes_asyncio/test/test_apps_v1beta1_deployment_rollback.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_apps_v1beta1_deployment_rollback.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/test/test_apps_v1beta1_deployment_rollback.py
|
PidgeyBE/kubernetes_asyncio
|
14d15dc309890253c26b6274a022e84441e05217
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.apps_v1beta1_deployment_rollback import AppsV1beta1DeploymentRollback # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestAppsV1beta1DeploymentRollback(unittest.TestCase):
"""AppsV1beta1DeploymentRollback unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAppsV1beta1DeploymentRollback(self):
"""Test AppsV1beta1DeploymentRollback"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.apps_v1beta1_deployment_rollback.AppsV1beta1DeploymentRollback() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.05
| 129
| 0.755083
|
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.apps_v1beta1_deployment_rollback import AppsV1beta1DeploymentRollback
from kubernetes_asyncio.client.rest import ApiException
class TestAppsV1beta1DeploymentRollback(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAppsV1beta1DeploymentRollback(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c41221b6cfc2a6ed96a7d025c02c9c4e2dc4e94
| 5,970
|
py
|
Python
|
skimage/_shared/testing.py
|
spark1729/scikit-image
|
65d525bcd5f30604c9a71c3480355580e9db7162
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/_shared/testing.py
|
spark1729/scikit-image
|
65d525bcd5f30604c9a71c3480355580e9db7162
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/_shared/testing.py
|
spark1729/scikit-image
|
65d525bcd5f30604c9a71c3480355580e9db7162
|
[
"BSD-3-Clause"
] | null | null | null |
"""Testing utilities."""
import os
import re
from tempfile import NamedTemporaryFile
from numpy import testing
import numpy as np
from skimage._shared._warnings import expected_warnings
import warnings
from .. import data, io, img_as_uint, img_as_float, img_as_int, img_as_ubyte
SKIP_RE = re.compile("(\s*>>>.*?)(\s*)#\s*skip\s+if\s+(.*)$")
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def doctest_skip_parser(func):
""" Decorator replaces custom skip test markup in doctests
Say a function has a docstring::
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> something # skip if HAVE_BMODULE
This decorator will evaluate the expression after ``skip if``. If this
evaluates to True, then the comment is replaced by ``# doctest: +SKIP``. If
False, then the comment is just removed. The expression is evaluated in the
``globals`` scope of `func`.
For example, if the module global ``HAVE_AMODULE`` is False, and module
global ``HAVE_BMODULE`` is False, the returned function will have docstring::
>>> something # doctest: +SKIP
>>> something + else
>>> something
"""
lines = func.__doc__.split('\n')
new_lines = []
for line in lines:
match = SKIP_RE.match(line)
if match is None:
new_lines.append(line)
continue
code, space, expr = match.groups()
try:
# Works as a function decorator
if eval(expr, func.__globals__):
code = code + space + "# doctest: +SKIP"
except AttributeError:
# Works as a class decorator
if eval(expr, func.__init__.__globals__):
code = code + space + "# doctest: +SKIP"
new_lines.append(code)
func.__doc__ = "\n".join(new_lines)
return func
def roundtrip(img, plugin, suffix):
"""Save and read an image using a specified plugin"""
if not '.' in suffix:
suffix = '.' + suffix
temp_file = NamedTemporaryFile(suffix=suffix, delete=False)
temp_file.close()
fname = temp_file.name
io.imsave(fname, img, plugin=plugin)
new = io.imread(fname, plugin=plugin)
try:
os.remove(fname)
except Exception:
pass
return new
def color_check(plugin, fmt='png'):
"""Check roundtrip behavior for color images.
All major input types should be handled as ubytes and read
back correctly.
"""
img = img_as_ubyte(data.chelsea())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision loss|unclosed file']):
r3 = roundtrip(img3, plugin, fmt)
testing.assert_allclose(r3, img)
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['sign loss|precision loss|unclosed file']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_ubyte(img4))
img5 = img_as_uint(img)
with expected_warnings(['precision loss|unclosed file']):
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img)
def mono_check(plugin, fmt='png'):
"""Check the roundtrip behavior for images that support most types.
All major input types should be handled.
"""
img = img_as_ubyte(data.moon())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision|unclosed file|\A\Z']):
r3 = roundtrip(img3, plugin, fmt)
if r3.dtype.kind == 'f':
testing.assert_allclose(img3, r3)
else:
testing.assert_allclose(r3, img_as_uint(img))
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss|\A\Z']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['precision loss|sign loss|unclosed file']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_uint(img4))
img5 = img_as_uint(img)
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img5)
def setup_test():
"""Default package level setup routine for skimage tests.
Import packages known to raise errors, and then
force warnings to raise errors.
Set a random seed
"""
warnings.simplefilter('default')
from scipy import signal, ndimage, special, optimize, linalg
from scipy.io import loadmat
from skimage import viewer, filter
np.random.seed(0)
warnings.simplefilter('error')
def teardown_test():
"""Default package level teardown routine for skimage tests.
Restore warnings to default behavior
"""
warnings.simplefilter('default')
if __name__ == '__main__':
color_check('pil')
mono_check('pil')
mono_check('pil', 'bmp')
mono_check('pil', 'tiff')
| 28.564593
| 81
| 0.638861
|
import os
import re
from tempfile import NamedTemporaryFile
from numpy import testing
import numpy as np
from skimage._shared._warnings import expected_warnings
import warnings
from .. import data, io, img_as_uint, img_as_float, img_as_int, img_as_ubyte
SKIP_RE = re.compile("(\s*>>>.*?)(\s*)#\s*skip\s+if\s+(.*)$")
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def doctest_skip_parser(func):
lines = func.__doc__.split('\n')
new_lines = []
for line in lines:
match = SKIP_RE.match(line)
if match is None:
new_lines.append(line)
continue
code, space, expr = match.groups()
try:
if eval(expr, func.__globals__):
code = code + space + "# doctest: +SKIP"
except AttributeError:
if eval(expr, func.__init__.__globals__):
code = code + space + "# doctest: +SKIP"
new_lines.append(code)
func.__doc__ = "\n".join(new_lines)
return func
def roundtrip(img, plugin, suffix):
if not '.' in suffix:
suffix = '.' + suffix
temp_file = NamedTemporaryFile(suffix=suffix, delete=False)
temp_file.close()
fname = temp_file.name
io.imsave(fname, img, plugin=plugin)
new = io.imread(fname, plugin=plugin)
try:
os.remove(fname)
except Exception:
pass
return new
def color_check(plugin, fmt='png'):
img = img_as_ubyte(data.chelsea())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision loss|unclosed file']):
r3 = roundtrip(img3, plugin, fmt)
testing.assert_allclose(r3, img)
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['sign loss|precision loss|unclosed file']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_ubyte(img4))
img5 = img_as_uint(img)
with expected_warnings(['precision loss|unclosed file']):
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img)
def mono_check(plugin, fmt='png'):
img = img_as_ubyte(data.moon())
r1 = roundtrip(img, plugin, fmt)
testing.assert_allclose(img, r1)
img2 = img > 128
r2 = roundtrip(img2, plugin, fmt)
testing.assert_allclose(img2.astype(np.uint8), r2)
img3 = img_as_float(img)
with expected_warnings(['precision|unclosed file|\A\Z']):
r3 = roundtrip(img3, plugin, fmt)
if r3.dtype.kind == 'f':
testing.assert_allclose(img3, r3)
else:
testing.assert_allclose(r3, img_as_uint(img))
with expected_warnings(['precision loss']):
img4 = img_as_int(img)
if fmt.lower() in (('tif', 'tiff')):
img4 -= 100
with expected_warnings(['sign loss|\A\Z']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img4)
else:
with expected_warnings(['precision loss|sign loss|unclosed file']):
r4 = roundtrip(img4, plugin, fmt)
testing.assert_allclose(r4, img_as_uint(img4))
img5 = img_as_uint(img)
r5 = roundtrip(img5, plugin, fmt)
testing.assert_allclose(r5, img5)
def setup_test():
warnings.simplefilter('default')
from scipy import signal, ndimage, special, optimize, linalg
from scipy.io import loadmat
from skimage import viewer, filter
np.random.seed(0)
warnings.simplefilter('error')
def teardown_test():
warnings.simplefilter('default')
if __name__ == '__main__':
color_check('pil')
mono_check('pil')
mono_check('pil', 'bmp')
mono_check('pil', 'tiff')
| true
| true
|
1c41222a6b11d443fb65bf6d70f484ce56870561
| 2,509
|
py
|
Python
|
Classifier/oxford_flower102.py
|
KlrShaK/Oxford_flowers102-using-Tensorflow
|
7966a0ead1ce0175fbb1d9d658da3ec915e2d77a
|
[
"MIT"
] | null | null | null |
Classifier/oxford_flower102.py
|
KlrShaK/Oxford_flowers102-using-Tensorflow
|
7966a0ead1ce0175fbb1d9d658da3ec915e2d77a
|
[
"MIT"
] | null | null | null |
Classifier/oxford_flower102.py
|
KlrShaK/Oxford_flowers102-using-Tensorflow
|
7966a0ead1ce0175fbb1d9d658da3ec915e2d77a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from acc_plotter import plot_accuracy
from tensorflow.keras.applications.inception_v3 import InceptionV3
dataset_name = 'oxford_flowers102'
train_dataset = tfds.load(dataset_name, split=tfds.Split.TRAIN)
val_dataset = tfds.load(dataset_name, split=tfds.Split.VALIDATION)
print(train_dataset)
print(val_dataset)
cp_path = 'best_weights.hdf5'
cp_callback = tf.keras.callbacks.ModelCheckpoint(cp_path, save_best_only=True, save_weights_only=True, verbose=2)
pre_trained_model = InceptionV3(include_top=False, weights= 'imagenet', input_shape=(300,300,3))
pre_trained_model.trainable = False
def preprocessing(features):
image = tf.image.resize(features['image'], size=(300,300))
print('Final image shape',image)
if tf.random.uniform(()) > 0.5:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, lower= 0, upper=5)
image = tf.image.random_brightness(image, 0.2)
image = tf.divide(image, 255.0)
label = features['label']
print('labels shape :',label)
label = tf.one_hot(features['label'], 102)
return image, label
def solution_model():
train_data= train_dataset.map(preprocessing).batch(32)
val_data= val_dataset.map(preprocessing).batch(32)
# for x in train_data:
# print(x[0].numpy())
# print(x[1].numpy())
# plt.imshow(x[0][0])
# plt.show()
model = tf.keras.Sequential(
[
pre_trained_model,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation='relu'),
tf.keras.layers.Dropout(0.6),
tf.keras.layers.Dense(1024, activation= 'relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(102, activation='softmax')
]
)
model.compile(optimizer=tf.optimizers.Adam(lr=5.42e-6), loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
try:
model.load_weights(cp_path) # ('best_weights_67%.hdf5') #'best_weights_63%.hdf5'
print('Weights Loaded !!!')
except:
print('No Previous Weights Found !!!')
history = model.fit(train_data, epochs=60, verbose=1, validation_data=val_data, callbacks=[cp_callback])
plot_accuracy(history)
return model
if __name__ == '__main__':
model = solution_model()
model.save('model.h5')
| 35.338028
| 115
| 0.66401
|
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from acc_plotter import plot_accuracy
from tensorflow.keras.applications.inception_v3 import InceptionV3
dataset_name = 'oxford_flowers102'
train_dataset = tfds.load(dataset_name, split=tfds.Split.TRAIN)
val_dataset = tfds.load(dataset_name, split=tfds.Split.VALIDATION)
print(train_dataset)
print(val_dataset)
cp_path = 'best_weights.hdf5'
cp_callback = tf.keras.callbacks.ModelCheckpoint(cp_path, save_best_only=True, save_weights_only=True, verbose=2)
pre_trained_model = InceptionV3(include_top=False, weights= 'imagenet', input_shape=(300,300,3))
pre_trained_model.trainable = False
def preprocessing(features):
image = tf.image.resize(features['image'], size=(300,300))
print('Final image shape',image)
if tf.random.uniform(()) > 0.5:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, lower= 0, upper=5)
image = tf.image.random_brightness(image, 0.2)
image = tf.divide(image, 255.0)
label = features['label']
print('labels shape :',label)
label = tf.one_hot(features['label'], 102)
return image, label
def solution_model():
train_data= train_dataset.map(preprocessing).batch(32)
val_data= val_dataset.map(preprocessing).batch(32)
model = tf.keras.Sequential(
[
pre_trained_model,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation='relu'),
tf.keras.layers.Dropout(0.6),
tf.keras.layers.Dense(1024, activation= 'relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(102, activation='softmax')
]
)
model.compile(optimizer=tf.optimizers.Adam(lr=5.42e-6), loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
try:
model.load_weights(cp_path) oaded !!!')
except:
print('No Previous Weights Found !!!')
history = model.fit(train_data, epochs=60, verbose=1, validation_data=val_data, callbacks=[cp_callback])
plot_accuracy(history)
return model
if __name__ == '__main__':
model = solution_model()
model.save('model.h5')
| true
| true
|
1c412274933c8205c6fdff7216129db9bc5bc53d
| 1,212
|
py
|
Python
|
python/tests/engine/test_impute.py
|
dreness/data-wrangling-components
|
cf1a6eb152bb4f2fd1d3b933b9aa32b965a29610
|
[
"MIT"
] | null | null | null |
python/tests/engine/test_impute.py
|
dreness/data-wrangling-components
|
cf1a6eb152bb4f2fd1d3b933b9aa32b965a29610
|
[
"MIT"
] | null | null | null |
python/tests/engine/test_impute.py
|
dreness/data-wrangling-components
|
cf1a6eb152bb4f2fd1d3b933b9aa32b965a29610
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project.
#
from tests.engine.test_store import get_test_store
from data_wrangling_components.engine.verbs.impute import impute
from data_wrangling_components.types import Step, Verb
def test_impute_with_string():
step = Step(
Verb.Impute,
"table5",
"output",
args={"to": "item", "value": "emptyValue"},
)
store = get_test_store()
result = impute(step, store)
assert len(result.columns) == 3
assert len(result) == 6
assert result.loc[0, "item"] == "bed"
assert result.loc[1, "item"] == "emptyValue"
assert result.loc[4, "item"] == "chair"
assert result.loc[5, "item"] == "emptyValue"
def test_impute_with_number():
step = Step(
Verb.Impute,
"table11",
"output",
args={"to": "y", "value": 5000},
)
store = get_test_store()
result = impute(step, store)
assert len(result.columns) == 3
assert len(result) == 3
assert result.loc[0, "y"] == 1
assert result.loc[1, "y"] == 5000
assert result.loc[2, "y"] == 1
| 24.24
| 67
| 0.590759
|
from tests.engine.test_store import get_test_store
from data_wrangling_components.engine.verbs.impute import impute
from data_wrangling_components.types import Step, Verb
def test_impute_with_string():
step = Step(
Verb.Impute,
"table5",
"output",
args={"to": "item", "value": "emptyValue"},
)
store = get_test_store()
result = impute(step, store)
assert len(result.columns) == 3
assert len(result) == 6
assert result.loc[0, "item"] == "bed"
assert result.loc[1, "item"] == "emptyValue"
assert result.loc[4, "item"] == "chair"
assert result.loc[5, "item"] == "emptyValue"
def test_impute_with_number():
step = Step(
Verb.Impute,
"table11",
"output",
args={"to": "y", "value": 5000},
)
store = get_test_store()
result = impute(step, store)
assert len(result.columns) == 3
assert len(result) == 3
assert result.loc[0, "y"] == 1
assert result.loc[1, "y"] == 5000
assert result.loc[2, "y"] == 1
| true
| true
|
1c4122ed5af14f238a921d169b5c91a69b567593
| 7,814
|
py
|
Python
|
docs/conf.py
|
suhyun-kim-div/cifar10
|
b5b93de3a8d8c32839d6c19831eb4c46f2e708bf
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
suhyun-kim-div/cifar10
|
b5b93de3a8d8c32839d6c19831eb4c46f2e708bf
|
[
"FTL"
] | null | null | null |
docs/conf.py
|
suhyun-kim-div/cifar10
|
b5b93de3a8d8c32839d6c19831eb4c46f2e708bf
|
[
"FTL"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# cifar10-practice documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cifar10-practice'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cifar10-practicedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'cifar10-practice.tex',
u'cifar10-practice Documentation',
u"dudaji", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cifar10-practice', u'cifar10-practice Documentation',
[u"dudaji"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cifar10-practice', u'cifar10-practice Documentation',
u"dudaji", 'cifar10-practice',
'simple cifar10 project', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.893878
| 80
| 0.707832
|
import os
import sys
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'cifar10-practice'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cifar10-practicedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'cifar10-practice.tex',
u'cifar10-practice Documentation',
u"dudaji", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cifar10-practice', u'cifar10-practice Documentation',
[u"dudaji"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cifar10-practice', u'cifar10-practice Documentation',
u"dudaji", 'cifar10-practice',
'simple cifar10 project', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true
| true
|
1c412340b6d4093988ac2b28656063c117e261ae
| 28,402
|
py
|
Python
|
pennylane/plugins/default_gaussian.py
|
rickyHong/Pennylane-repl
|
f18e5f233f84b91bbac8e61cebdee77c66fafd79
|
[
"Apache-2.0"
] | 2
|
2021-06-29T01:30:08.000Z
|
2021-08-23T10:38:52.000Z
|
pennylane/plugins/default_gaussian.py
|
rickyHong/Pennylane-repl
|
f18e5f233f84b91bbac8e61cebdee77c66fafd79
|
[
"Apache-2.0"
] | null | null | null |
pennylane/plugins/default_gaussian.py
|
rickyHong/Pennylane-repl
|
f18e5f233f84b91bbac8e61cebdee77c66fafd79
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=inconsistent-return-statements
"""
The :code:`default.gaussian` plugin is meant to be used as a template for writing PennyLane
device plugins for new CV backends.
It implements the necessary :class:`~pennylane._device.Device` methods as well as all built-in
:mod:`continuous-variable Gaussian operations <pennylane.ops.cv>`, and provides a very simple simulation of a
Gaussian-based quantum circuit architecture.
"""
# pylint: disable=attribute-defined-outside-init,too-many-arguments
import numpy as np
from scipy.special import factorial as fac
import pennylane as qml
from pennylane import Device
# tolerance for numerical errors
tolerance = 1e-10
#========================================================
# auxillary functions
#========================================================
def partitions(s, include_singles=True):
"""Partitions a sequence into all groupings of pairs and singles of elements.
Args:
s (sequence): the sequence to partition
include_singles (bool): if False, only partitions into pairs
is returned.
Returns:
tuple: returns a nested tuple, containing all partitions of the sequence.
"""
# pylint: disable=too-many-branches
if len(s) == 2:
if include_singles:
yield (s[0],), (s[1],)
yield tuple(s),
else:
# pull off a single item and partition the rest
if include_singles:
if len(s) > 1:
item_partition = (s[0],)
rest = s[1:]
rest_partitions = partitions(rest, include_singles)
for p in rest_partitions:
yield ((item_partition),) + p
else:
yield tuple(s),
# pull off a pair of items and partition the rest
for idx1 in range(1, len(s)):
item_partition = (s[0], s[idx1])
rest = s[1:idx1] + s[idx1+1:]
rest_partitions = partitions(rest, include_singles)
for p in rest_partitions:
yield ((item_partition),) + p
def fock_prob(mu, cov, event, hbar=2.):
r"""Returns the probability of detection of a particular PNR detection event.
For more details, see:
* Kruse, R., Hamilton, C. S., Sansoni, L., Barkhofen, S., Silberhorn, C., & Jex, I.
"A detailed study of Gaussian Boson Sampling." `arXiv:1801.07488. (2018).
<https://arxiv.org/abs/1801.07488>`_
* Hamilton, C. S., Kruse, R., Sansoni, L., Barkhofen, S., Silberhorn, C., & Jex, I.
"Gaussian boson sampling." `Physical review letters, 119(17), 170501. (2017).
<https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.119.170501>`_
Args:
mu (array): length-:math:`2N` means vector
cov (array): :math:`2N\times 2N` covariance matrix
event (array): length-:math:`N` array of non-negative integers representing the
PNR detection event of the multi-mode system.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
float: probability of detecting the event
"""
# number of modes
N = len(mu)//2
I = np.identity(N)
# mean displacement of each mode
alpha = (mu[:N] + 1j*mu[N:])/np.sqrt(2*hbar)
# the expectation values (<a_1>, <a_2>,...,<a_N>, <a^\dagger_1>, ..., <a^\dagger_N>)
beta = np.concatenate([alpha, alpha.conj()])
x = cov[:N, :N]*2/hbar
xp = cov[:N, N:]*2/hbar
p = cov[N:, N:]*2/hbar
# the (Hermitian) matrix elements <a_i^\dagger a_j>
aidaj = (x+p+1j*(xp-xp.T)-2*I)/4
# the (symmetric) matrix elements <a_i a_j>
aiaj = (x-p+1j*(xp+xp.T))/4
# calculate the covariance matrix sigma_Q appearing in the Q function:
# Q(alpha) = exp[-(alpha-beta).sigma_Q^{-1}.(alpha-beta)/2]/|sigma_Q|
Q = np.block([[aidaj, aiaj.conj()], [aiaj, aidaj.conj()]]) + np.identity(2*N)
# inverse Q matrix
Qinv = np.linalg.inv(Q)
# 1/sqrt(|Q|)
sqrt_Qdet = 1/np.sqrt(np.linalg.det(Q).real)
prefactor = np.exp(-beta @ Qinv @ beta.conj()/2)
if np.all(np.array(event) == 0):
# all PNRs detect the vacuum state
return (prefactor*sqrt_Qdet).real/np.prod(fac(event))
# the matrix X_n = [[0, I_n], [I_n, 0]]
O = np.zeros_like(I)
X = np.block([[O, I], [I, O]])
gamma = X @ Qinv.conj() @ beta
# For each mode, repeat the mode number event[i] times
ind = [i for sublist in [[idx]*j for idx, j in enumerate(event)] for i in sublist]
# extend the indices for xp-ordering of the Gaussian state
ind += [i+N for i in ind]
if np.linalg.norm(beta) < tolerance:
# state has no displacement
part = partitions(ind, include_singles=False)
else:
part = partitions(ind, include_singles=True)
# calculate Hamilton's A matrix: A = X.(I-Q^{-1})*
A = X @ (np.identity(2*N)-Qinv).conj()
summation = np.sum([np.prod([gamma[i[0]] if len(i) == 1 else A[i] for i in p]) for p in part])
return (prefactor*sqrt_Qdet*summation).real/np.prod(fac(event))
#========================================================
# parametrized gates
#========================================================
def rotation(phi):
"""Rotation in the phase space.
Args:
phi (float): rotation parameter
Returns:
array: symplectic transformation matrix
"""
return np.array([[np.cos(phi), -np.sin(phi)],
[np.sin(phi), np.cos(phi)]])
def displacement(state, wire, alpha, hbar=2):
"""Displacement in the phase space.
Args:
state (tuple): contains means vector and covariance matrix
wire (int): wire that the displacement acts on
alpha (float): complex displacement
Returns:
tuple: contains the vector of means and covariance matrix
"""
mu = state[0]
mu[wire] += alpha.real*np.sqrt(2*hbar)
mu[wire+len(mu)//2] += alpha.imag*np.sqrt(2*hbar)
return mu, state[1]
def squeezing(r, phi):
"""Squeezing in the phase space.
Args:
r (float): squeezing magnitude
phi (float): rotation parameter
Returns:
array: symplectic transformation matrix
"""
cp = np.cos(phi)
sp = np.sin(phi)
ch = np.cosh(r)
sh = np.sinh(r)
return np.array([[ch-cp*sh, -sp*sh],
[-sp*sh, ch+cp*sh]])
def quadratic_phase(s):
"""Quadratic phase shift.
Args:
s (float): gate parameter
Returns:
array: symplectic transformation matrix
"""
return np.array([[1, 0],
[s, 1]])
def beamsplitter(theta, phi):
r"""Beamsplitter.
Args:
theta (float): transmittivity angle (:math:`t=\cos\theta`)
phi (float): phase angle (:math:`r=e^{i\phi}\sin\theta`)
Returns:
array: symplectic transformation matrix
"""
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
S = np.array([[ct, -cp*st, 0, -st*sp],
[cp*st, ct, -st*sp, 0],
[0, st*sp, ct, -cp*st],
[st*sp, 0, cp*st, ct]])
return S
def two_mode_squeezing(r, phi):
"""Two-mode squeezing.
Args:
r (float): squeezing magnitude
phi (float): rotation parameter
Returns:
array: symplectic transformation matrix
"""
cp = np.cos(phi)
sp = np.sin(phi)
ch = np.cosh(r)
sh = np.sinh(r)
S = np.array([[ch, cp*sh, 0, sp*sh],
[cp*sh, ch, sp*sh, 0],
[0, sp*sh, ch, -cp*sh],
[sp*sh, 0, -cp*sh, ch]])
return S
def controlled_addition(s):
"""CX gate.
Args:
s (float): gate parameter
Returns:
array: symplectic transformation matrix
"""
S = np.array([[1, 0, 0, 0],
[s, 1, 0, 0],
[0, 0, 1, -s],
[0, 0, 0, 1]])
return S
def controlled_phase(s):
"""CZ gate.
Args:
s (float): gate parameter
Returns:
array: symplectic transformation matrix
"""
S = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, s, 1, 0],
[s, 0, 0, 1]])
return S
def interferometer(U):
"""Interferometer
Args:
U (array): unitary matrix
Returns:
array: symplectic transformation matrix
"""
N = 2*len(U)
X = U.real
Y = U.imag
rows = np.arange(N).reshape(2, -1).T.flatten()
S = np.vstack([np.hstack([X, -Y]),
np.hstack([Y, X])])[:, rows][rows]
return S
#========================================================
# Arbitrary states and operators
#========================================================
def squeezed_cov(r, phi, hbar=2):
r"""Returns the squeezed covariance matrix of a squeezed state.
Args:
r (float): the squeezing magnitude
p (float): the squeezing phase :math:`\phi`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed state
"""
cov = np.array([[np.exp(-2*r), 0],
[0, np.exp(2*r)]]) * hbar/2
R = rotation(phi/2)
return R @ cov @ R.T
def vacuum_state(wires, hbar=2.):
r"""Returns the vacuum state.
Args:
basis (str): Returns the vector of means and the covariance matrix
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the vacuum state
"""
means = np.zeros((2*wires))
cov = np.identity(2*wires) * hbar/2
state = [means, cov]
return state
def coherent_state(a, phi=0, hbar=2.):
r"""Returns a coherent state.
Args:
a (complex) : the displacement
phi (float): the phase
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the coherent state
"""
alpha = a*np.exp(1j*phi)
means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)
cov = np.identity(2) * hbar/2
state = [means, cov]
return state
def squeezed_state(r, phi, hbar=2.):
r"""Returns a squeezed state.
Args:
r (float): the squeezing magnitude
phi (float): the squeezing phase :math:`\phi`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed state
"""
means = np.zeros((2))
state = [means, squeezed_cov(r, phi, hbar)]
return state
def displaced_squeezed_state(a, phi_a, r, phi_r, hbar=2.):
r"""Returns a squeezed coherent state
Args:
a (real): the displacement magnitude
phi_a (real): the displacement phase
r (float): the squeezing magnitude
phi_r (float): the squeezing phase :math:`\phi_r`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed coherent state
"""
alpha = a * np.exp(1j*phi_a)
means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)
state = [means, squeezed_cov(r, phi_r, hbar)]
return state
def thermal_state(nbar, hbar=2.):
r"""Returns a thermal state.
Args:
nbar (float): the mean photon number
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the thermal state
"""
means = np.zeros([2])
state = [means, (2*nbar+1)*np.identity(2)*hbar/2]
return state
def gaussian_state(mu, cov, hbar=2.):
r"""Returns a Gaussian state.
This is simply a bare wrapper function,
since the means vector and covariance matrix
can be passed via the parameters unchanged.
Note that both the means vector and covariance
matrix should be in :math:`(\x_1,\dots, \x_N, \p_1, \dots, \p_N)`
ordering.
Args:
mu (array): vector means. Must be length-:math:`2N`,
where N is the number of modes
cov (array): covariance matrix. Must be dimension :math:`2N\times 2N`,
where N is the number of modes
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: the mean and covariance matrix of the Gaussian state
"""
# pylint: disable=unused-argument
return mu, cov
def set_state(state, wire, mu, cov):
r"""Inserts a single mode Gaussian into the
state representation of the complete system.
Args:
state (tuple): contains means vector
and covariance matrix of existing state
wire (int): wire corresponding to the new Gaussian state
mu (array): vector of means to insert
cov (array): covariance matrix to insert
Returns:
tuple: contains the vector of means and covariance matrix.
"""
mu0 = state[0]
cov0 = state[1]
N = len(mu0)//2
# insert the new state into the means vector
mu0[[wire, wire+N]] = mu
# insert the new state into the covariance matrix
ind = np.concatenate([np.array([wire]), np.array([wire])+N])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
cov0[rows, cols] = cov
return mu0, cov0
#========================================================
# expectations
#========================================================
def photon_number(mu, cov, wires, params, total_wires, hbar=2.):
r"""Calculates the mean photon number for a given one-mode state.
Args:
mu (array): length-2 vector of means
cov (array): :math:`2\times 2` covariance matrix
wires (Sequence[int]): wires to calculate the expectation for
params (None): no parameters are used for this expectation value
total_wires (int): total number of wires in the system
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: contains the photon number expectation and variance
"""
# pylint: disable=unused-argument
ex = (np.trace(cov) + mu.T @ mu)/(2*hbar) - 1/2
var = (np.trace(cov @ cov) + 2*mu.T @ cov @ mu)/(2*hbar**2) - 1/4
return ex, var
def homodyne(phi=None):
"""Function factory that returns the Homodyne expectation of a one mode state.
Args:
phi (float): the default phase space axis to perform the Homodyne measurement
Returns:
function: A function that accepts a single mode means vector, covariance matrix,
and phase space angle phi, and returns the quadrature expectation
value and variance.
"""
if phi is not None:
def _homodyne(mu, cov, wires, params, total_wires, hbar=2.):
"""Arbitrary angle homodyne expectation."""
# pylint: disable=unused-argument
rot = rotation(phi)
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
return muphi[0], covphi[0, 0]
return _homodyne
def _homodyne(mu, cov, wires, params, total_wires, hbar=2.):
"""Arbitrary angle homodyne expectation."""
# pylint: disable=unused-argument
rot = rotation(params[0])
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
return muphi[0], covphi[0, 0]
return _homodyne
def poly_quad_expectations(mu, cov, wires, params, total_wires, hbar=2.):
r"""Calculates the expectation and variance for an arbitrary
polynomial of quadrature operators.
Args:
mu (array): vector of means
cov (array): covariance matrix
wires (Sequence[int]): wires to calculate the expectation for
params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear
and quadratic coefficients of the quadrature operators
:math:`(\I, \x_0, \p_0, \x_1, \p_1,\dots)`
total_wires (int): total number of wires in the system
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: the mean and variance of the quadrature-polynomial observable
"""
Q = params[0]
# HACK, we need access to the Poly instance in order to expand the matrix!
# TODO: maybe we should make heisenberg_obs a class method or a static method to avoid this being a 'hack'?
op = qml.ops.PolyXP(Q, wires=wires)
Q = op.heisenberg_obs(total_wires)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return d.T @ mu + Q[0], d.T @ cov @ d
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
A = M[1:, 1:]
d = d1 + d2
k = M[0, 0]
d2 = 2*A @ mu + d
k2 = mu.T @ A @ mu + mu.T @ d + k
ex = np.trace(A @ cov) + k2
var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2
modes = np.arange(2*total_wires).reshape(2, -1).T
groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes])
var -= groenewald_correction
return ex, var
def fock_expectation(mu, cov, wires, params, total_wires, hbar=2.):
r"""Calculates the expectation and variance of a Fock state probability.
Args:
mu (array): length-:math:`2N` vector of means
cov (array): :math:`2N\times 2N` covariance matrix
wires (Sequence[int]): wires to calculate the expectation for
params (Sequence[int]): the Fock state to return the expectation value for
total_wires (int): total number of wires in the system
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: the Fock state expectation and variance
"""
# pylint: disable=unused-argument
ex = fock_prob(mu, cov, params[0], hbar=hbar)
# var[|n><n|] = E[|n><n|^2] - E[|n><n|]^2 = E[|n><n|] - E[|n><n|]^2
var = ex - ex**2
return ex, var
def identity(*_, **__):
r"""Returns 1.
Returns:
tuple: the Fock state expectation and variance
"""
return 1, 0
#========================================================
# device
#========================================================
class DefaultGaussian(Device):
r"""Default Gaussian device for PennyLane.
Args:
wires (int): the number of modes to initialize the device in
shots (int): How many times the circuit should be evaluated (or sampled) to estimate
the expectation values.
If ``analytic == True``, then the number of shots is ignored
in the calculation of expectation values and variances, and only controls the number
of samples returned by ``sample``.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
analytic (bool): indicates if the device should calculate expectations
and variances analytically
"""
name = 'Default Gaussian PennyLane plugin'
short_name = 'default.gaussian'
pennylane_requires = '0.8'
version = '0.8.0'
author = 'Xanadu Inc.'
_capabilities = {"model": "cv"}
_operation_map = {
'Beamsplitter': beamsplitter,
'ControlledAddition': controlled_addition,
'ControlledPhase': controlled_phase,
'Displacement': displacement,
'QuadraticPhase': quadratic_phase,
'Rotation': rotation,
'Squeezing': squeezing,
'TwoModeSqueezing': two_mode_squeezing,
'CoherentState': coherent_state,
'DisplacedSqueezedState': displaced_squeezed_state,
'SqueezedState': squeezed_state,
'ThermalState': thermal_state,
'GaussianState': gaussian_state,
'Interferometer': interferometer
}
_observable_map = {
'NumberOperator': photon_number,
'X': homodyne(0),
'P': homodyne(np.pi/2),
'QuadOperator': homodyne(None),
'PolyXP': poly_quad_expectations,
'FockStateProjector': fock_expectation,
'Identity': identity
}
_circuits = {}
def __init__(self, wires, *, shots=1000, hbar=2, analytic=True):
super().__init__(wires, shots)
self.eng = None
self.hbar = hbar
self.analytic = analytic
self.reset()
def pre_apply(self):
self.reset()
def apply(self, operation, wires, par):
if operation == 'Displacement':
self._state = displacement(self._state, wires[0], par[0]*np.exp(1j*par[1]))
return # we are done here
if operation == 'GaussianState':
if wires != list(range(self.num_wires)):
raise ValueError("GaussianState means vector or covariance matrix is "
"the incorrect size for the number of subsystems.")
self._state = self._operation_map[operation](*par, hbar=self.hbar)
return # we are done here
if 'State' in operation:
# set the new device state
mu, cov = self._operation_map[operation](*par, hbar=self.hbar)
# state preparations only act on at most 1 subsystem
self._state = set_state(self._state, wires[0], mu, cov)
return # we are done here
# get the symplectic matrix
S = self._operation_map[operation](*par)
# expand the symplectic to act on the proper subsystem
S = self.expand(S, wires)
# apply symplectic matrix to the means vector
means = S @ self._state[0]
# apply symplectic matrix to the covariance matrix
cov = S @ self._state[1] @ S.T
self._state = [means, cov]
def expand(self, S, wires):
r"""Expands a Symplectic matrix S to act on the entire subsystem.
Args:
S (array): a :math:`2M\times 2M` Symplectic matrix
wires (Sequence[int]): the wires of the modes that S acts on
Returns:
array: the resulting :math:`2N\times 2N` Symplectic matrix
"""
if self.num_wires == 1:
# total number of wires is 1, simply return the matrix
return S
N = self.num_wires
w = np.asarray(wires)
if np.any(w < 0) or np.any(w >= N) or len(set(w)) != len(w):
raise ValueError("Invalid target subsystems provided in 'wires' argument.")
M = len(S) // 2
S2 = np.identity(2 * N)
if M != len(wires):
raise ValueError('Incorrect number of subsystems for provided operation.')
S2[w.reshape(-1, 1), w.reshape(1, -1)] = S[:M, :M].copy() # XX
S2[(w + N).reshape(-1, 1), (w + N).reshape(1, -1)] = S[M:, M:].copy() # PP
S2[w.reshape(-1, 1), (w + N).reshape(1, -1)] = S[:M, M:].copy() # XP
S2[(w + N).reshape(-1, 1), w.reshape(1, -1)] = S[M:, :M].copy() # PX
return S2
def expval(self, observable, wires, par):
if observable == "PolyXP":
mu, cov = self._state
else:
mu, cov = self.reduced_state(wires)
ev, var = self._observable_map[observable](mu, cov, wires, par, self.num_wires, hbar=self.hbar)
if not self.analytic:
# estimate the ev
# use central limit theorem, sample normal distribution once, only ok if n_eval is large
# (see https://en.wikipedia.org/wiki/Berry%E2%80%93Esseen_theorem)
ev = np.random.normal(ev, np.sqrt(var / self.shots))
return ev
def var(self, observable, wires, par):
mu, cov = self.reduced_state(wires)
_, var = self._observable_map[observable](mu, cov, wires, par, hbar=self.hbar, total_wires=self.num_wires)
return var
def cov(self, observable1, wires1, par1, observable2, wires2, par2):
# assume for now that the wires are disjoint
wires = wires1 + wires2
if observable1 != "NumberOperator" or observable2 != "NumberOperator":
raise Exception("Only NumberOperator supported so far.")
# For now we just assume the observables are number operators...
# see Dodonov et al., Multidimensional Hermite polynomial and photon distribution
# They use (p, q) ordering instead of (q, p), but in this case it does not matter because the
# matrices Lambda are the same in both orderings
mu, cov = self.reduced_state(wires)
#mu *= np.sqrt(2*self.hbar)
#cov *= self.hbar/2
Lambda1 = np.zeros((4, 4))
Lambda1[0, 0] = 1
Lambda1[2, 2] = 1
Lambda2 = np.zeros((4, 4))
Lambda2[1, 1] = 1
Lambda2[3, 3] = 1
#return .125 * np.trace(Lambda1 @ cov @ Lambda2 @ cov) + .5 * np.dot(mu, Lambda1 @ cov @ Lambda2 @ mu)
return (np.trace(Lambda1 @ cov @ Lambda2 @ (cov + 2 * np.outer(mu, mu)))) /(2*self.hbar**2)
def sample(self, observable, wires, par):
"""Return a sample of an observable.
.. note::
The ``default.gaussian`` plugin only supports sampling
from :class:`~.X`, :class:`~.P`, and :class:`~.QuadOperator`
observables.
Args:
observable (str): name of the observable
wires (Sequence[int]): subsystems the observable is to be measured on
par (tuple): parameters for the observable
Returns:
array[float]: samples in an array of dimension ``(n, num_wires)``
"""
if len(wires) != 1:
raise ValueError("Only one mode can be measured in homodyne.")
if observable == "X":
phi = 0.0
elif observable == "P":
phi = np.pi/2
elif observable == "QuadOperator":
phi = par[0]
else:
raise NotImplementedError("default.gaussian does not support sampling {}".format(observable))
mu, cov = self.reduced_state(wires)
rot = rotation(phi)
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
stdphi = np.sqrt(covphi[0, 0])
meanphi = muphi[0]
return np.random.normal(meanphi, stdphi, self.shots)
def reset(self):
"""Reset the device"""
# init the state vector to |00..0>
self._state = vacuum_state(self.num_wires, self.hbar)
def reduced_state(self, wires):
r""" Returns the vector of means and the covariance matrix of the specified wires.
Args:
wires (int of Sequence[int]): indices of the requested wires
Returns:
tuple (means, cov): means is an array containing the vector of means,
and cov is a square array containing the covariance matrix
"""
if wires == list(range(self.num_wires)):
# reduced state is full state
return self._state
# reduce rho down to specified subsystems
if isinstance(wires, int):
wires = [wires]
if np.any(np.array(wires) > self.num_wires):
raise ValueError("The specified wires cannot "
"be larger than the number of subsystems.")
ind = np.concatenate([np.array(wires), np.array(wires)+self.num_wires])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
return self._state[0][ind], self._state[1][rows, cols]
@property
def operations(self):
return set(self._operation_map.keys())
@property
def observables(self):
return set(self._observable_map.keys())
| 32.020293
| 114
| 0.579642
|
import numpy as np
from scipy.special import factorial as fac
import pennylane as qml
from pennylane import Device
tolerance = 1e-10
def partitions(s, include_singles=True):
if len(s) == 2:
if include_singles:
yield (s[0],), (s[1],)
yield tuple(s),
else:
if include_singles:
if len(s) > 1:
item_partition = (s[0],)
rest = s[1:]
rest_partitions = partitions(rest, include_singles)
for p in rest_partitions:
yield ((item_partition),) + p
else:
yield tuple(s),
for idx1 in range(1, len(s)):
item_partition = (s[0], s[idx1])
rest = s[1:idx1] + s[idx1+1:]
rest_partitions = partitions(rest, include_singles)
for p in rest_partitions:
yield ((item_partition),) + p
def fock_prob(mu, cov, event, hbar=2.):
N = len(mu)//2
I = np.identity(N)
alpha = (mu[:N] + 1j*mu[N:])/np.sqrt(2*hbar)
beta = np.concatenate([alpha, alpha.conj()])
x = cov[:N, :N]*2/hbar
xp = cov[:N, N:]*2/hbar
p = cov[N:, N:]*2/hbar
aidaj = (x+p+1j*(xp-xp.T)-2*I)/4
aiaj = (x-p+1j*(xp+xp.T))/4
Q = np.block([[aidaj, aiaj.conj()], [aiaj, aidaj.conj()]]) + np.identity(2*N)
Qinv = np.linalg.inv(Q)
sqrt_Qdet = 1/np.sqrt(np.linalg.det(Q).real)
prefactor = np.exp(-beta @ Qinv @ beta.conj()/2)
if np.all(np.array(event) == 0):
return (prefactor*sqrt_Qdet).real/np.prod(fac(event))
O = np.zeros_like(I)
X = np.block([[O, I], [I, O]])
gamma = X @ Qinv.conj() @ beta
ind = [i for sublist in [[idx]*j for idx, j in enumerate(event)] for i in sublist]
ind += [i+N for i in ind]
if np.linalg.norm(beta) < tolerance:
part = partitions(ind, include_singles=False)
else:
part = partitions(ind, include_singles=True)
A = X @ (np.identity(2*N)-Qinv).conj()
summation = np.sum([np.prod([gamma[i[0]] if len(i) == 1 else A[i] for i in p]) for p in part])
return (prefactor*sqrt_Qdet*summation).real/np.prod(fac(event))
#========================================================
# parametrized gates
#========================================================
def rotation(phi):
return np.array([[np.cos(phi), -np.sin(phi)],
[np.sin(phi), np.cos(phi)]])
def displacement(state, wire, alpha, hbar=2):
mu = state[0]
mu[wire] += alpha.real*np.sqrt(2*hbar)
mu[wire+len(mu)//2] += alpha.imag*np.sqrt(2*hbar)
return mu, state[1]
def squeezing(r, phi):
cp = np.cos(phi)
sp = np.sin(phi)
ch = np.cosh(r)
sh = np.sinh(r)
return np.array([[ch-cp*sh, -sp*sh],
[-sp*sh, ch+cp*sh]])
def quadratic_phase(s):
return np.array([[1, 0],
[s, 1]])
def beamsplitter(theta, phi):
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
S = np.array([[ct, -cp*st, 0, -st*sp],
[cp*st, ct, -st*sp, 0],
[0, st*sp, ct, -cp*st],
[st*sp, 0, cp*st, ct]])
return S
def two_mode_squeezing(r, phi):
cp = np.cos(phi)
sp = np.sin(phi)
ch = np.cosh(r)
sh = np.sinh(r)
S = np.array([[ch, cp*sh, 0, sp*sh],
[cp*sh, ch, sp*sh, 0],
[0, sp*sh, ch, -cp*sh],
[sp*sh, 0, -cp*sh, ch]])
return S
def controlled_addition(s):
S = np.array([[1, 0, 0, 0],
[s, 1, 0, 0],
[0, 0, 1, -s],
[0, 0, 0, 1]])
return S
def controlled_phase(s):
S = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, s, 1, 0],
[s, 0, 0, 1]])
return S
def interferometer(U):
N = 2*len(U)
X = U.real
Y = U.imag
rows = np.arange(N).reshape(2, -1).T.flatten()
S = np.vstack([np.hstack([X, -Y]),
np.hstack([Y, X])])[:, rows][rows]
return S
#========================================================
# Arbitrary states and operators
#========================================================
def squeezed_cov(r, phi, hbar=2):
cov = np.array([[np.exp(-2*r), 0],
[0, np.exp(2*r)]]) * hbar/2
R = rotation(phi/2)
return R @ cov @ R.T
def vacuum_state(wires, hbar=2.):
means = np.zeros((2*wires))
cov = np.identity(2*wires) * hbar/2
state = [means, cov]
return state
def coherent_state(a, phi=0, hbar=2.):
alpha = a*np.exp(1j*phi)
means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)
cov = np.identity(2) * hbar/2
state = [means, cov]
return state
def squeezed_state(r, phi, hbar=2.):
means = np.zeros((2))
state = [means, squeezed_cov(r, phi, hbar)]
return state
def displaced_squeezed_state(a, phi_a, r, phi_r, hbar=2.):
alpha = a * np.exp(1j*phi_a)
means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)
state = [means, squeezed_cov(r, phi_r, hbar)]
return state
def thermal_state(nbar, hbar=2.):
means = np.zeros([2])
state = [means, (2*nbar+1)*np.identity(2)*hbar/2]
return state
def gaussian_state(mu, cov, hbar=2.):
# pylint: disable=unused-argument
return mu, cov
def set_state(state, wire, mu, cov):
mu0 = state[0]
cov0 = state[1]
N = len(mu0)//2
# insert the new state into the means vector
mu0[[wire, wire+N]] = mu
# insert the new state into the covariance matrix
ind = np.concatenate([np.array([wire]), np.array([wire])+N])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
cov0[rows, cols] = cov
return mu0, cov0
#========================================================
# expectations
#========================================================
def photon_number(mu, cov, wires, params, total_wires, hbar=2.):
# pylint: disable=unused-argument
ex = (np.trace(cov) + mu.T @ mu)/(2*hbar) - 1/2
var = (np.trace(cov @ cov) + 2*mu.T @ cov @ mu)/(2*hbar**2) - 1/4
return ex, var
def homodyne(phi=None):
if phi is not None:
def _homodyne(mu, cov, wires, params, total_wires, hbar=2.):
# pylint: disable=unused-argument
rot = rotation(phi)
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
return muphi[0], covphi[0, 0]
return _homodyne
def _homodyne(mu, cov, wires, params, total_wires, hbar=2.):
# pylint: disable=unused-argument
rot = rotation(params[0])
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
return muphi[0], covphi[0, 0]
return _homodyne
def poly_quad_expectations(mu, cov, wires, params, total_wires, hbar=2.):
Q = params[0]
# HACK, we need access to the Poly instance in order to expand the matrix!
# TODO: maybe we should make heisenberg_obs a class method or a static method to avoid this being a 'hack'?
op = qml.ops.PolyXP(Q, wires=wires)
Q = op.heisenberg_obs(total_wires)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return d.T @ mu + Q[0], d.T @ cov @ d
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
A = M[1:, 1:]
d = d1 + d2
k = M[0, 0]
d2 = 2*A @ mu + d
k2 = mu.T @ A @ mu + mu.T @ d + k
ex = np.trace(A @ cov) + k2
var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2
modes = np.arange(2*total_wires).reshape(2, -1).T
groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes])
var -= groenewald_correction
return ex, var
def fock_expectation(mu, cov, wires, params, total_wires, hbar=2.):
# pylint: disable=unused-argument
ex = fock_prob(mu, cov, params[0], hbar=hbar)
# var[|n><n|] = E[|n><n|^2] - E[|n><n|]^2 = E[|n><n|] - E[|n><n|]^2
var = ex - ex**2
return ex, var
def identity(*_, **__):
return 1, 0
#========================================================
# device
#========================================================
class DefaultGaussian(Device):
name = 'Default Gaussian PennyLane plugin'
short_name = 'default.gaussian'
pennylane_requires = '0.8'
version = '0.8.0'
author = 'Xanadu Inc.'
_capabilities = {"model": "cv"}
_operation_map = {
'Beamsplitter': beamsplitter,
'ControlledAddition': controlled_addition,
'ControlledPhase': controlled_phase,
'Displacement': displacement,
'QuadraticPhase': quadratic_phase,
'Rotation': rotation,
'Squeezing': squeezing,
'TwoModeSqueezing': two_mode_squeezing,
'CoherentState': coherent_state,
'DisplacedSqueezedState': displaced_squeezed_state,
'SqueezedState': squeezed_state,
'ThermalState': thermal_state,
'GaussianState': gaussian_state,
'Interferometer': interferometer
}
_observable_map = {
'NumberOperator': photon_number,
'X': homodyne(0),
'P': homodyne(np.pi/2),
'QuadOperator': homodyne(None),
'PolyXP': poly_quad_expectations,
'FockStateProjector': fock_expectation,
'Identity': identity
}
_circuits = {}
def __init__(self, wires, *, shots=1000, hbar=2, analytic=True):
super().__init__(wires, shots)
self.eng = None
self.hbar = hbar
self.analytic = analytic
self.reset()
def pre_apply(self):
self.reset()
def apply(self, operation, wires, par):
if operation == 'Displacement':
self._state = displacement(self._state, wires[0], par[0]*np.exp(1j*par[1]))
return # we are done here
if operation == 'GaussianState':
if wires != list(range(self.num_wires)):
raise ValueError("GaussianState means vector or covariance matrix is "
"the incorrect size for the number of subsystems.")
self._state = self._operation_map[operation](*par, hbar=self.hbar)
return # we are done here
if 'State' in operation:
# set the new device state
mu, cov = self._operation_map[operation](*par, hbar=self.hbar)
# state preparations only act on at most 1 subsystem
self._state = set_state(self._state, wires[0], mu, cov)
return # we are done here
# get the symplectic matrix
S = self._operation_map[operation](*par)
# expand the symplectic to act on the proper subsystem
S = self.expand(S, wires)
# apply symplectic matrix to the means vector
means = S @ self._state[0]
# apply symplectic matrix to the covariance matrix
cov = S @ self._state[1] @ S.T
self._state = [means, cov]
def expand(self, S, wires):
if self.num_wires == 1:
# total number of wires is 1, simply return the matrix
return S
N = self.num_wires
w = np.asarray(wires)
if np.any(w < 0) or np.any(w >= N) or len(set(w)) != len(w):
raise ValueError("Invalid target subsystems provided in 'wires' argument.")
M = len(S) // 2
S2 = np.identity(2 * N)
if M != len(wires):
raise ValueError('Incorrect number of subsystems for provided operation.')
S2[w.reshape(-1, 1), w.reshape(1, -1)] = S[:M, :M].copy() # XX
S2[(w + N).reshape(-1, 1), (w + N).reshape(1, -1)] = S[M:, M:].copy() # PP
S2[w.reshape(-1, 1), (w + N).reshape(1, -1)] = S[:M, M:].copy() # XP
S2[(w + N).reshape(-1, 1), w.reshape(1, -1)] = S[M:, :M].copy() # PX
return S2
def expval(self, observable, wires, par):
if observable == "PolyXP":
mu, cov = self._state
else:
mu, cov = self.reduced_state(wires)
ev, var = self._observable_map[observable](mu, cov, wires, par, self.num_wires, hbar=self.hbar)
if not self.analytic:
# estimate the ev
# use central limit theorem, sample normal distribution once, only ok if n_eval is large
# (see https://en.wikipedia.org/wiki/Berry%E2%80%93Esseen_theorem)
ev = np.random.normal(ev, np.sqrt(var / self.shots))
return ev
def var(self, observable, wires, par):
mu, cov = self.reduced_state(wires)
_, var = self._observable_map[observable](mu, cov, wires, par, hbar=self.hbar, total_wires=self.num_wires)
return var
def cov(self, observable1, wires1, par1, observable2, wires2, par2):
# assume for now that the wires are disjoint
wires = wires1 + wires2
if observable1 != "NumberOperator" or observable2 != "NumberOperator":
raise Exception("Only NumberOperator supported so far.")
# For now we just assume the observables are number operators...
# see Dodonov et al., Multidimensional Hermite polynomial and photon distribution
# They use (p, q) ordering instead of (q, p), but in this case it does not matter because the
# matrices Lambda are the same in both orderings
mu, cov = self.reduced_state(wires)
#mu *= np.sqrt(2*self.hbar)
#cov *= self.hbar/2
Lambda1 = np.zeros((4, 4))
Lambda1[0, 0] = 1
Lambda1[2, 2] = 1
Lambda2 = np.zeros((4, 4))
Lambda2[1, 1] = 1
Lambda2[3, 3] = 1
#return .125 * np.trace(Lambda1 @ cov @ Lambda2 @ cov) + .5 * np.dot(mu, Lambda1 @ cov @ Lambda2 @ mu)
return (np.trace(Lambda1 @ cov @ Lambda2 @ (cov + 2 * np.outer(mu, mu)))) /(2*self.hbar**2)
def sample(self, observable, wires, par):
if len(wires) != 1:
raise ValueError("Only one mode can be measured in homodyne.")
if observable == "X":
phi = 0.0
elif observable == "P":
phi = np.pi/2
elif observable == "QuadOperator":
phi = par[0]
else:
raise NotImplementedError("default.gaussian does not support sampling {}".format(observable))
mu, cov = self.reduced_state(wires)
rot = rotation(phi)
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
stdphi = np.sqrt(covphi[0, 0])
meanphi = muphi[0]
return np.random.normal(meanphi, stdphi, self.shots)
def reset(self):
# init the state vector to |00..0>
self._state = vacuum_state(self.num_wires, self.hbar)
def reduced_state(self, wires):
if wires == list(range(self.num_wires)):
# reduced state is full state
return self._state
# reduce rho down to specified subsystems
if isinstance(wires, int):
wires = [wires]
if np.any(np.array(wires) > self.num_wires):
raise ValueError("The specified wires cannot "
"be larger than the number of subsystems.")
ind = np.concatenate([np.array(wires), np.array(wires)+self.num_wires])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
return self._state[0][ind], self._state[1][rows, cols]
@property
def operations(self):
return set(self._operation_map.keys())
@property
def observables(self):
return set(self._observable_map.keys())
| true
| true
|
1c4123d7dca17ab93ea3afbdefc8b1fe3e2114f5
| 20
|
py
|
Python
|
vim/fred.py
|
timm/16
|
56e6c978274fb6a865cc804bee6f4b6ebb4e108f
|
[
"Unlicense"
] | null | null | null |
vim/fred.py
|
timm/16
|
56e6c978274fb6a865cc804bee6f4b6ebb4e108f
|
[
"Unlicense"
] | 1
|
2016-04-28T01:50:44.000Z
|
2016-04-28T01:50:44.000Z
|
vim/fred.py
|
timm/16
|
56e6c978274fb6a865cc804bee6f4b6ebb4e108f
|
[
"Unlicense"
] | null | null | null |
def sasd():
fdfd
| 6.666667
| 12
| 0.55
|
def sasd():
fdfd
| true
| true
|
1c412499a255c1d223f8d65a8a695fa0af669689
| 1,573
|
py
|
Python
|
tests/tests_indiv_jobs/test_commandjob.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests_indiv_jobs/test_commandjob.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tests_indiv_jobs/test_commandjob.py
|
tadinve/ctm_python_client
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
[
"BSD-3-Clause"
] | null | null | null |
from ctm_python_client.jobs.command import CommandJob
import os
from ctm_python_client.core.bmc_control_m import CmJobFlow
from ctm_python_client.session.session import Session
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(BASE_PATH, ".secrets"), "r") as fp:
ctm_uri = fp.readline().strip()
ctm_user = fp.readline().strip()
ctm_pwd = fp.readline().strip()
# Create CTM Session
session = Session(endpoint=ctm_uri, username=ctm_user, password=ctm_pwd)
# CREATE JOB FLOW
t1_flow = CmJobFlow(
application="Naga0.3_Test", sub_application="TestAllJobs", session=session
)
t1_flow.set_run_as(username="ctmuser", host="acb-rhctmv20")
# Define the schedule
months = ["JAN", "OCT", "DEC"]
monthDays = ["ALL"]
weekDays = ["MON", "TUE", "WED", "THU", "FRI"]
fromTime = "0300"
toTime = "2100"
t1_flow.set_schedule(months, monthDays, weekDays, fromTime, toTime)
# Create Folder
fn = os.path.split(__file__)[-1][:-3]
f1 = t1_flow.create_folder(name=fn)
j1 = CommandJob(
folder=f1,
job_name='command',
command="echo hello",
pre_command="echo before running main command",
post_command="echo after running main command",
host="myhost.mycomp.com",
run_as="user1",
)
t1_flow.add_job(folder=f1, job=j1)
import json
x = t1_flow.deploy()
s = str(x[0])
s = s.replace("'", '"')
s = s.replace("None", '"None"')
s = s.replace("False", '"False"')
s = s.replace("True", '"True"')
s = s.replace("\n", "")
j = json.loads(s)
def test_output():
assert j["successful_smart_folders_count"] == 1
| 26.661017
| 78
| 0.68595
|
from ctm_python_client.jobs.command import CommandJob
import os
from ctm_python_client.core.bmc_control_m import CmJobFlow
from ctm_python_client.session.session import Session
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(BASE_PATH, ".secrets"), "r") as fp:
ctm_uri = fp.readline().strip()
ctm_user = fp.readline().strip()
ctm_pwd = fp.readline().strip()
session = Session(endpoint=ctm_uri, username=ctm_user, password=ctm_pwd)
t1_flow = CmJobFlow(
application="Naga0.3_Test", sub_application="TestAllJobs", session=session
)
t1_flow.set_run_as(username="ctmuser", host="acb-rhctmv20")
months = ["JAN", "OCT", "DEC"]
monthDays = ["ALL"]
weekDays = ["MON", "TUE", "WED", "THU", "FRI"]
fromTime = "0300"
toTime = "2100"
t1_flow.set_schedule(months, monthDays, weekDays, fromTime, toTime)
fn = os.path.split(__file__)[-1][:-3]
f1 = t1_flow.create_folder(name=fn)
j1 = CommandJob(
folder=f1,
job_name='command',
command="echo hello",
pre_command="echo before running main command",
post_command="echo after running main command",
host="myhost.mycomp.com",
run_as="user1",
)
t1_flow.add_job(folder=f1, job=j1)
import json
x = t1_flow.deploy()
s = str(x[0])
s = s.replace("'", '"')
s = s.replace("None", '"None"')
s = s.replace("False", '"False"')
s = s.replace("True", '"True"')
s = s.replace("\n", "")
j = json.loads(s)
def test_output():
assert j["successful_smart_folders_count"] == 1
| true
| true
|
1c4124bd25be8fb7e58fbbf5070501ee21d0ff13
| 4,211
|
py
|
Python
|
tests/test_create_container.py
|
movermeyer/pyspaces
|
45c1dba6b8ca467d7617399c8d8b7bdb7370808b
|
[
"MIT"
] | 88
|
2015-04-26T18:03:43.000Z
|
2021-12-17T15:00:04.000Z
|
tests/test_create_container.py
|
movermeyer/pyspaces
|
45c1dba6b8ca467d7617399c8d8b7bdb7370808b
|
[
"MIT"
] | 6
|
2015-04-27T12:36:03.000Z
|
2019-01-18T04:48:24.000Z
|
tests/test_create_container.py
|
movermeyer/pyspaces
|
45c1dba6b8ca467d7617399c8d8b7bdb7370808b
|
[
"MIT"
] | 19
|
2015-04-26T21:16:18.000Z
|
2021-10-04T17:16:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import subprocess as s
from pyspaces import Container, Chroot, Inject, setns
def execute(argv):
"""Execute programm with arguments.
Args:
*args (list): arguments
"""
os.execvp(argv[0], argv)
def test_basic_container(capfd):
"""Check basic namespace.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1',
newpid=True, newuser=True, newns=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_all_ns_container(capfd):
"""Check all namespaces.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_not_all_ns_container(capfd):
"""Check all namespaces without network ns.
```
bash# mount -t proc proc /proc
bash# ps ax
PID TTY STAT TIME COMMAND
1 pts/3 S 0:00 bash
22 pts/3 R+ 0:00 ps ax
```
"""
cmd = "mount -t proc proc /proc; ps ax; sleep 0.1"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True, net=False
)
c.start()
out, err = capfd.readouterr()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_basic_chroot(capfd):
"""Check basic chroot"""
c = Chroot(target=execute, args=(('/bin/ls', '/home/'),),
uid_map=True, newpid=True,
path=os.path.expanduser('~/.local/share/lxc/ubuntu/rootfs/')
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out == 'ubuntu\n'
def test_all_inject(capfd):
"""Check all ns inject"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 0.1'),),
uid_map='1000', all=True
)
c.start()
i = Inject(target=execute, args=(('bash', '-c', 'id'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
def test_not_all_inject(capfd):
"""Check inject without network ns"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True, net=False
)
c.start()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_all_setns(capfd):
"""Check basic inject"""
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True
)
c.start()
with setns(c.pid, all=True):
outt = s.check_output("id", shell=True)
out, err = capfd.readouterr()
print(out, err)
print(outt)
c.join()
assert outt.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
if __name__ == '__main__':
pytest.main()
| 26.31875
| 78
| 0.523391
|
import os
import pytest
import subprocess as s
from pyspaces import Container, Chroot, Inject, setns
def execute(argv):
os.execvp(argv[0], argv)
def test_basic_container(capfd):
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1',
newpid=True, newuser=True, newns=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_all_ns_container(capfd):
cmd = "mount -t proc proc /proc; ps ax"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert len(out.splitlines()) == 3
def test_not_all_ns_container(capfd):
cmd = "mount -t proc proc /proc; ps ax; sleep 0.1"
c = Container(target=execute, args=(('bash', '-c', cmd),),
uid_map='0 1000 1', all=True, net=False
)
c.start()
out, err = capfd.readouterr()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_basic_chroot(capfd):
c = Chroot(target=execute, args=(('/bin/ls', '/home/'),),
uid_map=True, newpid=True,
path=os.path.expanduser('~/.local/share/lxc/ubuntu/rootfs/')
)
c.start()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out == 'ubuntu\n'
def test_all_inject(capfd):
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 0.1'),),
uid_map='1000', all=True
)
c.start()
i = Inject(target=execute, args=(('bash', '-c', 'id'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out, err)
assert out.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
def test_not_all_inject(capfd):
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True, net=False
)
c.start()
out0 = s.check_output("ifconfig -a", shell=True)
i = Inject(target=execute, args=(('ifconfig', '-a'),),
target_pid=c.pid, all=True
)
i.start()
i.join()
c.join()
out, err = capfd.readouterr()
print(out0)
print(out, err)
assert out != out0
def test_all_setns(capfd):
c = Container(target=execute, args=(('bash','-c',
'mount -t proc /proc; sleep 2'),),
uid_map='1000', all=True
)
c.start()
with setns(c.pid, all=True):
outt = s.check_output("id", shell=True)
out, err = capfd.readouterr()
print(out, err)
print(outt)
c.join()
assert outt.split()[:2] == ["uid=0(root)", "gid=65534(nogroup)"]
if __name__ == '__main__':
pytest.main()
| true
| true
|
1c41252554682224e52be6851d2b0268886f09a3
| 2,173
|
py
|
Python
|
core/migrations/0013_auto_20160315_0215.py
|
rafaelbantu/timtec
|
86c51b7440a044704ed33c3e752a6cf6b15ceae3
|
[
"BSD-3-Clause"
] | 21
|
2015-09-23T14:07:16.000Z
|
2022-02-18T01:35:18.000Z
|
core/migrations/0013_auto_20160315_0215.py
|
rafaelbantu/timtec
|
86c51b7440a044704ed33c3e752a6cf6b15ceae3
|
[
"BSD-3-Clause"
] | 178
|
2016-05-10T16:16:19.000Z
|
2021-12-15T20:21:21.000Z
|
core/migrations/0013_auto_20160315_0215.py
|
rafaelbantu/timtec
|
86c51b7440a044704ed33c3e752a6cf6b15ceae3
|
[
"BSD-3-Clause"
] | 18
|
2015-10-23T13:28:17.000Z
|
2021-09-22T13:08:28.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import core.utils
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20160131_1643'),
]
operations = [
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='if_name',
),
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='logo',
),
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='signature',
),
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='signature_name',
),
migrations.AddField(
model_name='certificatetemplate',
name='base_logo',
field=models.ImageField(upload_to=core.utils.HashName(b'base_logo', b'organization_name'), null=True, verbose_name='Logo', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='cert_logo',
field=models.ImageField(upload_to=core.utils.HashName(b'logo', b'organization_name'), null=True, verbose_name='Logo', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='name',
field=models.CharField(max_length=255, null=True, verbose_name='Signature Name', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='organization_name',
field=models.CharField(max_length=50, null=True, verbose_name='Name', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='role',
field=models.CharField(max_length=128, null=True, verbose_name='Role', blank=True),
),
migrations.AlterField(
model_name='lesson',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from=b'name', max_length=128, unique=True, verbose_name='Slug'),
),
]
| 34.492063
| 147
| 0.605614
|
from __future__ import unicode_literals
from django.db import migrations, models
import core.utils
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20160131_1643'),
]
operations = [
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='if_name',
),
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='logo',
),
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='signature',
),
migrations.RemoveField(
model_name='ifcertificatetemplate',
name='signature_name',
),
migrations.AddField(
model_name='certificatetemplate',
name='base_logo',
field=models.ImageField(upload_to=core.utils.HashName(b'base_logo', b'organization_name'), null=True, verbose_name='Logo', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='cert_logo',
field=models.ImageField(upload_to=core.utils.HashName(b'logo', b'organization_name'), null=True, verbose_name='Logo', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='name',
field=models.CharField(max_length=255, null=True, verbose_name='Signature Name', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='organization_name',
field=models.CharField(max_length=50, null=True, verbose_name='Name', blank=True),
),
migrations.AddField(
model_name='certificatetemplate',
name='role',
field=models.CharField(max_length=128, null=True, verbose_name='Role', blank=True),
),
migrations.AlterField(
model_name='lesson',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from=b'name', max_length=128, unique=True, verbose_name='Slug'),
),
]
| true
| true
|
1c4126afc86b80a33e7354acb5cc01326a0ae386
| 602
|
py
|
Python
|
other/application/windowApp/test/main2.py
|
Ethan7102/FYP
|
c6560a0b95ad78d5e1a341ab2d93c063e10c6631
|
[
"MIT"
] | null | null | null |
other/application/windowApp/test/main2.py
|
Ethan7102/FYP
|
c6560a0b95ad78d5e1a341ab2d93c063e10c6631
|
[
"MIT"
] | null | null | null |
other/application/windowApp/test/main2.py
|
Ethan7102/FYP
|
c6560a0b95ad78d5e1a341ab2d93c063e10c6631
|
[
"MIT"
] | 1
|
2021-01-23T07:59:57.000Z
|
2021-01-23T07:59:57.000Z
|
import sys
import ui_main
from PyQt5 import QtWidgets
Ui_MainWindow = ui_main.Ui_MainWindow
class CoperQt(QtWidgets.QMainWindow,Ui_MainWindow):#创建一个Qt对象
def __init__(self):
QtWidgets.QMainWindow.__init__(self) # 创建主界面对象
Ui_MainWindow.__init__(self) # 主界面对象初始化
self.setupUi(self) # 配置主界面对象
#self.label.setText("hi")
#self.label.text()
#print(self.label.text())
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = CoperQt()#创建QT对象
window.show()#QT对象显示
#print(window.label.text())
sys.exit(app.exec_())
| 25.083333
| 60
| 0.679402
|
import sys
import ui_main
from PyQt5 import QtWidgets
Ui_MainWindow = ui_main.Ui_MainWindow
class CoperQt(QtWidgets.QMainWindow,Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = CoperQt()
window.show()
sys.exit(app.exec_())
| true
| true
|
1c4126b407a6f7cb32b4335eef5afef876dca360
| 3,962
|
py
|
Python
|
creational/singleton.py
|
prateeksan/python-design-patterns
|
f9c6fc3501a5a2c8467980aaf1ba7a9839bf07cf
|
[
"MIT"
] | 61
|
2017-06-07T22:48:38.000Z
|
2022-02-25T01:06:37.000Z
|
creational/singleton.py
|
smohant/python-design-patterns
|
f9c6fc3501a5a2c8467980aaf1ba7a9839bf07cf
|
[
"MIT"
] | 2
|
2017-06-25T21:08:58.000Z
|
2021-04-20T00:41:49.000Z
|
creational/singleton.py
|
smohant/python-design-patterns
|
f9c6fc3501a5a2c8467980aaf1ba7a9839bf07cf
|
[
"MIT"
] | 12
|
2017-06-08T15:34:52.000Z
|
2022-02-24T09:01:05.000Z
|
""" The Singleton Pattern
Notes:
While there are multiple ways to implement the Singleton pattern, the point of
the Singleton pattern is to expose only one object without the possiblity to
create multiple _instances of the object.
It is important to note that if your need for this design pattern can be met by
a simple class-less python module (within a .py file), that solution is simpler and
usually preferable. One limitation of using a module's namespace instead of a Singleton
is that a module cannot be constructed with arguments and cannot have properties.
I strongly recommend the 'Borg' pattern as a more elegant alternative (see borg.py). It
usually meets all the needs of a Singleton with a more classical OOP approach.
"""
class Singleton:
"""The wrapper class serves as an interface for the nested _Singleton class.
While there may be several instances of this wrapper class, it will always point
to the same instance of the nested object (Singleton._instance)."""
class _Singleton:
"""This nested class should be instantiated only once and assigned to
Singleton._instance. There should never be a need to interface with it outside
the scope of the wrapper class."""
def __init__(self, **kwargs):
"""The exact implementation of the constructor depends on your use case.
This constructor allows for the setting of an unspecified number of attributes."""
for key, value in kwargs.iteritems():
# For a key:value pair of a:1, the next line would equate to `self.a = 1`
setattr(self, key, value)
# This will store the one and only instance of _Singleton
_instance = None
def __init__(self, **kwargs):
"""The constructor for the wrapper class should only accept arguments for parameters
that the nested _Singleton can accept. Its purpose is to create an instance of the
_Singleton if none exists, or to update the instance if it already exists. The exact
implementation depends on your use case. This implementation allows new instances of
the wrapper to update previously set attributes of the _instance object and add new
ones if needed."""
if not Singleton._instance:
Singleton._instance = Singleton._Singleton(**kwargs)
else:
for key, value in kwargs.iteritems():
# See line 22 if the line below seems confusing.
setattr(Singleton._instance, key, value)
def __getattr__(self, name):
"""This allows the user to access attributes of the _instance via the wrapper."""
return getattr(self._instance, name)
if __name__ == '__main__':
"""
Let's suppose our singleton represents the state of all settings for an application.
While the use case for this example can be met by a dictionary, using a Singleton allows
you to implement further functionality (such as adding methods).
For example you could implement a `connect_to_db` method that would try to connect to the
db at `db_location` if the attribute is set with a valid value.
"""
app_settings = Singleton(live=True, port=5000)
# All new instances will point back to the pre-existing Singleton
# This implementation allows the new constructor to overwrite attributes or add new ones.
app_settings_2 = Singleton(port=3000, db_location="far_away")
# Tests
print("Do app_settings and app_settings_2 share the same instance?")
print(app_settings._instance is app_settings_2._instance) # True
print("Do app_settings and app_settings_2 share the same state?")
print("live: {}".format(app_settings.live == app_settings_2.live == True)) # True
print("port: {}".format(app_settings.port == app_settings_2.port == 3000)) # True
print("db_location: {}".format(app_settings.db_location == app_settings_2.db_location ==
"far_away")) # True
| 45.022727
| 94
| 0.711762
|
class Singleton:
class _Singleton:
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
_instance = None
def __init__(self, **kwargs):
if not Singleton._instance:
Singleton._instance = Singleton._Singleton(**kwargs)
else:
for key, value in kwargs.iteritems():
setattr(Singleton._instance, key, value)
def __getattr__(self, name):
return getattr(self._instance, name)
if __name__ == '__main__':
app_settings = Singleton(live=True, port=5000)
app_settings_2 = Singleton(port=3000, db_location="far_away")
print("Do app_settings and app_settings_2 share the same instance?")
print(app_settings._instance is app_settings_2._instance)
print("Do app_settings and app_settings_2 share the same state?")
print("live: {}".format(app_settings.live == app_settings_2.live == True))
print("port: {}".format(app_settings.port == app_settings_2.port == 3000))
print("db_location: {}".format(app_settings.db_location == app_settings_2.db_location ==
"far_away"))
| true
| true
|
1c41271be1b5aa6a3435445a84975d388d71bf32
| 320
|
py
|
Python
|
django_town/oauth2/grant/sessiongrant.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
django_town/oauth2/grant/sessiongrant.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
django_town/oauth2/grant/sessiongrant.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
from .grant import Grant
from ..endpoint import TokenEndpoint
class SessionGrant(Grant):
def get_credential(self, expires_in):
self._token_endpoint = TokenEndpoint(self._server, self._request, self._client)
return self._token_endpoint.get_credential_session(expires_in)
| 29.090909
| 87
| 0.740625
|
from .grant import Grant
from ..endpoint import TokenEndpoint
class SessionGrant(Grant):
def get_credential(self, expires_in):
self._token_endpoint = TokenEndpoint(self._server, self._request, self._client)
return self._token_endpoint.get_credential_session(expires_in)
| true
| true
|
1c41280faea7ed4db16a0e0b319e02226e95b412
| 2,089
|
py
|
Python
|
visualize/usecases/get_user_commits.py
|
RevanthRyo/Alize
|
60f4153c0c4b665e60c02bc90f99f833bf3173c8
|
[
"Unlicense"
] | 160
|
2018-05-08T09:12:35.000Z
|
2021-11-08T14:45:18.000Z
|
visualize/usecases/get_user_commits.py
|
RevanthRyo/Alize
|
60f4153c0c4b665e60c02bc90f99f833bf3173c8
|
[
"Unlicense"
] | 15
|
2018-05-08T09:13:53.000Z
|
2022-03-11T23:20:39.000Z
|
visualize/usecases/get_user_commits.py
|
RevanthRyo/Alize
|
60f4153c0c4b665e60c02bc90f99f833bf3173c8
|
[
"Unlicense"
] | 12
|
2018-05-08T16:19:11.000Z
|
2021-11-08T14:45:58.000Z
|
import json
import requests
from visualize.utils.api import Client
from visualize.usecases.generate_wordcloud import GenerateWordCloud
class GetUserCommits():
"""docstring for GetUserCommits"""
def __init__(self, repo_info):
self.repo_names = repo_info
self.result = {"total_commits": 0, "last_5_commits": [], "all_commits": []}
def count_user_commits(self, user):
for repo in self.repo_names:
yield repo, self.count_repo_commits(user, repo)
def extract_more_info(self, repo_name, commits):
for c in commits:
self.result["all_commits"].append({
"name": repo_name,
"message": c["commit"]["message"] if "commit" in c else "",
"date": c["commit"]["committer"]["date"] if "commit" in c else ""
})
def most_popular_words(self):
generate = GenerateWordCloud()
self.result["word_cloud_file"] = generate.execute("".join([i["message"] for i in self.result["all_commits"]]))
def count_repo_commits(self, user, repo, _acc=0, page=1):
request = Client().user_commits(url_params={"username": user, "repo_name": repo, "page": page}, pure=True)
commits = request.json()
self.extract_more_info(repo, commits)
commit_count = len(commits)
if commit_count == 0:
return _acc
link = request.headers.get('link')
if link is None:
return _acc + commit_count
next_url = self.find_next(request.headers['link'])
if next_url is None:
return _acc + commit_count
return self.count_repo_commits(user, repo, _acc + commit_count, page=page+1)
def find_next(self, link):
for l in link.split(','):
a, b = l.split(';')
if b.strip() == 'rel="next"':
return a.strip()[1:-1]
def execute(self, user):
for repo_name, commit_count in self.count_user_commits(user):
self.result["total_commits"] += commit_count
print ("Repo %s has %d commits" % (repo_name, commit_count))
print ("Total commits: %d" % self.result["total_commits"])
self.result["all_commits"].sort(key=lambda item: item["date"], reverse=True)
self.result["last_5_commits"].extend(self.result["all_commits"][:5])
self.most_popular_words()
return self.result
| 34.245902
| 112
| 0.702729
|
import json
import requests
from visualize.utils.api import Client
from visualize.usecases.generate_wordcloud import GenerateWordCloud
class GetUserCommits():
def __init__(self, repo_info):
self.repo_names = repo_info
self.result = {"total_commits": 0, "last_5_commits": [], "all_commits": []}
def count_user_commits(self, user):
for repo in self.repo_names:
yield repo, self.count_repo_commits(user, repo)
def extract_more_info(self, repo_name, commits):
for c in commits:
self.result["all_commits"].append({
"name": repo_name,
"message": c["commit"]["message"] if "commit" in c else "",
"date": c["commit"]["committer"]["date"] if "commit" in c else ""
})
def most_popular_words(self):
generate = GenerateWordCloud()
self.result["word_cloud_file"] = generate.execute("".join([i["message"] for i in self.result["all_commits"]]))
def count_repo_commits(self, user, repo, _acc=0, page=1):
request = Client().user_commits(url_params={"username": user, "repo_name": repo, "page": page}, pure=True)
commits = request.json()
self.extract_more_info(repo, commits)
commit_count = len(commits)
if commit_count == 0:
return _acc
link = request.headers.get('link')
if link is None:
return _acc + commit_count
next_url = self.find_next(request.headers['link'])
if next_url is None:
return _acc + commit_count
return self.count_repo_commits(user, repo, _acc + commit_count, page=page+1)
def find_next(self, link):
for l in link.split(','):
a, b = l.split(';')
if b.strip() == 'rel="next"':
return a.strip()[1:-1]
def execute(self, user):
for repo_name, commit_count in self.count_user_commits(user):
self.result["total_commits"] += commit_count
print ("Repo %s has %d commits" % (repo_name, commit_count))
print ("Total commits: %d" % self.result["total_commits"])
self.result["all_commits"].sort(key=lambda item: item["date"], reverse=True)
self.result["last_5_commits"].extend(self.result["all_commits"][:5])
self.most_popular_words()
return self.result
| true
| true
|
1c4128b52113ded399b48959f457c867d2698b26
| 9,739
|
py
|
Python
|
utils/clustering.py
|
autobotasia/autoface
|
8283a089c824acc62640899864111cf6962cb692
|
[
"MIT"
] | 1
|
2020-01-02T06:53:32.000Z
|
2020-01-02T06:53:32.000Z
|
utils/clustering.py
|
autobotasia/autoface
|
8283a089c824acc62640899864111cf6962cb692
|
[
"MIT"
] | 7
|
2020-01-02T12:44:03.000Z
|
2021-06-10T20:19:20.000Z
|
utils/clustering.py
|
autobotasia/autoface
|
8283a089c824acc62640899864111cf6962cb692
|
[
"MIT"
] | 3
|
2020-01-22T09:38:19.000Z
|
2020-08-12T23:59:27.000Z
|
""" Face Cluster """
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import os
import math
def face_distance(face_encodings, face_to_compare):
"""
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance
for each comparison face. The distance tells you how similar the faces are.
:param faces: List of face encodings to compare
:param face_to_compare: A face encoding to compare against
:return: A numpy ndarray with the distance for each face in the same order as the 'faces' array
"""
import numpy as np
if len(face_encodings) == 0:
return np.empty((0))
#return 1/np.linalg.norm(face_encodings - face_to_compare, axis=1)
return np.sum(face_encodings*face_to_compare,axis=1)
def load_model(model_dir, meta_file, ckpt_file):
model_dir_exp = os.path.expanduser(model_dir)
saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file))
saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file))
def _chinese_whispers(encoding_list, threshold=0.75, iterations=20):
""" Chinese Whispers Algorithm
Modified from Alex Loveless' implementation,
http://alexloveless.co.uk/data/chinese-whispers-graph-clustering-in-python/
Inputs:
encoding_list: a list of facial encodings from face_recognition
threshold: facial match threshold,default 0.6
iterations: since chinese whispers is an iterative algorithm, number of times to iterate
Outputs:
sorted_clusters: a list of clusters, a cluster being a list of imagepaths,
sorted by largest cluster to smallest
"""
#from face_recognition.api import _face_distance
from random import shuffle
import networkx as nx
# Create graph
nodes = []
edges = []
image_paths, encodings = zip(*encoding_list)
if len(encodings) <= 1:
print ("No enough encodings to cluster!")
return []
for idx, face_encoding_to_check in enumerate(encodings):
# Adding node of facial encoding
node_id = idx+1
# Initialize 'cluster' to unique value (cluster of itself)
node = (node_id, {'cluster': image_paths[idx], 'path': image_paths[idx]})
nodes.append(node)
# Facial encodings to compare
if (idx+1) >= len(encodings):
# Node is last element, don't create edge
break
compare_encodings = encodings[idx+1:]
distances = face_distance(compare_encodings, face_encoding_to_check)
encoding_edges = []
for i, distance in enumerate(distances):
if distance > threshold:
# Add edge if facial match
edge_id = idx+i+2
encoding_edges.append((node_id, edge_id, {'weight': distance}))
edges = edges + encoding_edges
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
# Iterate
for _ in range(0, iterations):
cluster_nodes = G.nodes()
#shuffle(cluster_nodes)
for node in cluster_nodes:
neighbors = G[node]
clusters = {}
for ne in neighbors:
if isinstance(ne, int):
if G.nodes[ne]['cluster'] in clusters:
clusters[G.nodes[ne]['cluster']] += G[node][ne]['weight']
else:
clusters[G.nodes[ne]['cluster']] = G[node][ne]['weight']
# find the class with the highest edge weight sum
edge_weight_sum = 0
max_cluster = 0
#use the max sum of neighbor weights class as current node's class
for cluster in clusters:
if clusters[cluster] > edge_weight_sum:
edge_weight_sum = clusters[cluster]
max_cluster = cluster
# set the class of target node to the winning local class
G.nodes[node]['cluster'] = max_cluster
clusters = {}
# Prepare cluster output
for (_, data) in G.nodes.items():
cluster = data['cluster']
path = data['path']
if cluster:
if cluster not in clusters:
clusters[cluster] = []
clusters[cluster].append(path)
# Sort cluster output
sorted_clusters = sorted(clusters.values(), key=len, reverse=True)
return sorted_clusters
def cluster_facial_encodings(facial_encodings):
""" Cluster facial encodings
Intended to be an optional switch for different clustering algorithms, as of right now
only chinese whispers is available.
Input:
facial_encodings: (image_path, facial_encoding) dictionary of facial encodings
Output:
sorted_clusters: a list of clusters, a cluster being a list of imagepaths,
sorted by largest cluster to smallest
"""
if len(facial_encodings) <= 1:
print ("Number of facial encodings must be greater than one, can't cluster")
return []
# Only use the chinese whispers algorithm for now
sorted_clusters = _chinese_whispers(facial_encodings.items())
return sorted_clusters
def compute_facial_encodings(sess,images_placeholder,embeddings,phase_train_placeholder,image_size,
embedding_size,nrof_images,nrof_batches,emb_array,batch_size,paths):
""" Compute Facial Encodings
Given a set of images, compute the facial encodings of each face detected in the images and
return them. If no faces, or more than one face found, return nothing for that image.
Inputs:
image_paths: a list of image paths
Outputs:
facial_encodings: (image_path, facial_encoding) dictionary of facial encodings
"""
for i in range(nrof_batches):
start_index = i*batch_size
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, False, False, image_size)
feed_dict = { images_placeholder:images, phase_train_placeholder:False }
emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
facial_encodings = {}
for x in range(nrof_images):
facial_encodings[paths[x]] = emb_array[x,:]
return facial_encodings
def get_onedir(paths):
dataset = []
path_exp = os.path.expanduser(paths)
if os.path.isdir(path_exp):
images = os.listdir(path_exp)
image_paths = [os.path.join(path_exp,img) for img in images]
for x in image_paths:
if os.path.getsize(x)>0:
dataset.append(x)
return dataset
def main(args):
""" Main
Given a list of images, save out facial encoding data files and copy
images into folders of face clusters.
"""
from os.path import join, basename, exists
from os import makedirs
import numpy as np
import shutil
import sys
if not exists(args.output):
makedirs(args.output)
with tf.Graph().as_default():
with tf.Session() as sess:
image_paths = get_onedir(args.input)
#image_list, label_list = facenet.get_image_paths_and_labels(train_set)
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir))
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
load_model(args.model_dir, meta_file, ckpt_file)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
image_size = images_placeholder.get_shape()[1]
print("image_size:",image_size)
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Runnning forward pass on images')
nrof_images = len(image_paths)
nrof_batches = int(math.ceil(1.0*nrof_images / args.batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
facial_encodings = compute_facial_encodings(sess,images_placeholder,embeddings,phase_train_placeholder,image_size,
embedding_size,nrof_images,nrof_batches,emb_array,args.batch_size,image_paths)
sorted_clusters = cluster_facial_encodings(facial_encodings)
num_cluster = len(sorted_clusters)
# Copy image files to cluster folders
for idx, cluster in enumerate(sorted_clusters):
#save all the cluster
cluster_dir = join(args.output, str(idx))
if not exists(cluster_dir):
makedirs(cluster_dir)
for path in cluster:
shutil.copy(path, join(cluster_dir, basename(path)))
def parse_args():
"""Parse input arguments."""
import argparse
parser = argparse.ArgumentParser(description='Get a shape mesh (t-pose)')
parser.add_argument('--model_dir', type=str, help='model dir', required=True)
parser.add_argument('--batch_size', type=int, help='batch size', required=30)
parser.add_argument('--input', type=str, help='Input dir of images', required=True)
parser.add_argument('--output', type=str, help='Output dir of clusters', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
""" Entry point """
main(parse_args())
| 36.339552
| 126
| 0.645652
|
import tensorflow as tf
import numpy as np
import importlib
import argparse
import facenet
import os
import math
def face_distance(face_encodings, face_to_compare):
import numpy as np
if len(face_encodings) == 0:
return np.empty((0))
return np.sum(face_encodings*face_to_compare,axis=1)
def load_model(model_dir, meta_file, ckpt_file):
model_dir_exp = os.path.expanduser(model_dir)
saver = tf.train.import_meta_graph(os.path.join(model_dir_exp, meta_file))
saver.restore(tf.get_default_session(), os.path.join(model_dir_exp, ckpt_file))
def _chinese_whispers(encoding_list, threshold=0.75, iterations=20):
from random import shuffle
import networkx as nx
nodes = []
edges = []
image_paths, encodings = zip(*encoding_list)
if len(encodings) <= 1:
print ("No enough encodings to cluster!")
return []
for idx, face_encoding_to_check in enumerate(encodings):
node_id = idx+1
node = (node_id, {'cluster': image_paths[idx], 'path': image_paths[idx]})
nodes.append(node)
if (idx+1) >= len(encodings):
break
compare_encodings = encodings[idx+1:]
distances = face_distance(compare_encodings, face_encoding_to_check)
encoding_edges = []
for i, distance in enumerate(distances):
if distance > threshold:
# Add edge if facial match
edge_id = idx+i+2
encoding_edges.append((node_id, edge_id, {'weight': distance}))
edges = edges + encoding_edges
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
# Iterate
for _ in range(0, iterations):
cluster_nodes = G.nodes()
#shuffle(cluster_nodes)
for node in cluster_nodes:
neighbors = G[node]
clusters = {}
for ne in neighbors:
if isinstance(ne, int):
if G.nodes[ne]['cluster'] in clusters:
clusters[G.nodes[ne]['cluster']] += G[node][ne]['weight']
else:
clusters[G.nodes[ne]['cluster']] = G[node][ne]['weight']
# find the class with the highest edge weight sum
edge_weight_sum = 0
max_cluster = 0
#use the max sum of neighbor weights class as current node's class
for cluster in clusters:
if clusters[cluster] > edge_weight_sum:
edge_weight_sum = clusters[cluster]
max_cluster = cluster
G.nodes[node]['cluster'] = max_cluster
clusters = {}
for (_, data) in G.nodes.items():
cluster = data['cluster']
path = data['path']
if cluster:
if cluster not in clusters:
clusters[cluster] = []
clusters[cluster].append(path)
sorted_clusters = sorted(clusters.values(), key=len, reverse=True)
return sorted_clusters
def cluster_facial_encodings(facial_encodings):
if len(facial_encodings) <= 1:
print ("Number of facial encodings must be greater than one, can't cluster")
return []
# Only use the chinese whispers algorithm for now
sorted_clusters = _chinese_whispers(facial_encodings.items())
return sorted_clusters
def compute_facial_encodings(sess,images_placeholder,embeddings,phase_train_placeholder,image_size,
embedding_size,nrof_images,nrof_batches,emb_array,batch_size,paths):
for i in range(nrof_batches):
start_index = i*batch_size
end_index = min((i+1)*batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
images = facenet.load_data(paths_batch, False, False, image_size)
feed_dict = { images_placeholder:images, phase_train_placeholder:False }
emb_array[start_index:end_index,:] = sess.run(embeddings, feed_dict=feed_dict)
facial_encodings = {}
for x in range(nrof_images):
facial_encodings[paths[x]] = emb_array[x,:]
return facial_encodings
def get_onedir(paths):
dataset = []
path_exp = os.path.expanduser(paths)
if os.path.isdir(path_exp):
images = os.listdir(path_exp)
image_paths = [os.path.join(path_exp,img) for img in images]
for x in image_paths:
if os.path.getsize(x)>0:
dataset.append(x)
return dataset
def main(args):
from os.path import join, basename, exists
from os import makedirs
import numpy as np
import shutil
import sys
if not exists(args.output):
makedirs(args.output)
with tf.Graph().as_default():
with tf.Session() as sess:
image_paths = get_onedir(args.input)
#image_list, label_list = facenet.get_image_paths_and_labels(train_set)
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir))
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
load_model(args.model_dir, meta_file, ckpt_file)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
image_size = images_placeholder.get_shape()[1]
print("image_size:",image_size)
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Runnning forward pass on images')
nrof_images = len(image_paths)
nrof_batches = int(math.ceil(1.0*nrof_images / args.batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
facial_encodings = compute_facial_encodings(sess,images_placeholder,embeddings,phase_train_placeholder,image_size,
embedding_size,nrof_images,nrof_batches,emb_array,args.batch_size,image_paths)
sorted_clusters = cluster_facial_encodings(facial_encodings)
num_cluster = len(sorted_clusters)
# Copy image files to cluster folders
for idx, cluster in enumerate(sorted_clusters):
#save all the cluster
cluster_dir = join(args.output, str(idx))
if not exists(cluster_dir):
makedirs(cluster_dir)
for path in cluster:
shutil.copy(path, join(cluster_dir, basename(path)))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Get a shape mesh (t-pose)')
parser.add_argument('--model_dir', type=str, help='model dir', required=True)
parser.add_argument('--batch_size', type=int, help='batch size', required=30)
parser.add_argument('--input', type=str, help='Input dir of images', required=True)
parser.add_argument('--output', type=str, help='Output dir of clusters', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main(parse_args())
| true
| true
|
1c41298b6c44587b3d02dcadff97e0acc59d5515
| 1,045
|
py
|
Python
|
venv/Scripts/static-script.py
|
thiagofreitascarneiro/Projeto_Fusion
|
4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9
|
[
"MIT"
] | null | null | null |
venv/Scripts/static-script.py
|
thiagofreitascarneiro/Projeto_Fusion
|
4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9
|
[
"MIT"
] | null | null | null |
venv/Scripts/static-script.py
|
thiagofreitascarneiro/Projeto_Fusion
|
4bf9d1c69ddf83fbc957e9ccdc41112d71bbffa9
|
[
"MIT"
] | null | null | null |
#!c:\users\thiago\pycharmprojects\pythonproject1\pythonproject\pythonproject\fusion\venv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'static3==0.7.0','console_scripts','static'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'static3==0.7.0'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('static3==0.7.0', 'console_scripts', 'static')())
| 30.735294
| 107
| 0.713876
|
import re
import sys
quires__ = 'static3==0.7.0'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('static3==0.7.0', 'console_scripts', 'static')())
| true
| true
|
1c412ab71ef0761b26af363087dbd1a8f623c110
| 245
|
py
|
Python
|
core/colors/__init__.py
|
TusharAMD/astar
|
df3911b6f7ce765dc9964ce8feaf26d3195fe8fc
|
[
"Apache-2.0"
] | 14
|
2020-01-13T13:20:43.000Z
|
2020-04-16T00:03:59.000Z
|
core/colors/__init__.py
|
TusharAMD/astar
|
df3911b6f7ce765dc9964ce8feaf26d3195fe8fc
|
[
"Apache-2.0"
] | 1
|
2020-08-06T03:17:30.000Z
|
2020-08-06T03:17:30.000Z
|
core/colors/__init__.py
|
mHaisham/sortvis
|
91d56acbdd575c129fc88e5ae54aaf5caae55eb0
|
[
"Apache-2.0"
] | 1
|
2020-01-13T14:02:44.000Z
|
2020-01-13T14:02:44.000Z
|
from .color import Color
TRANSPARENT = Color(255, 255, 255, 0)
WHITE = Color(255, 255, 255)
BLACK = Color(0, 0, 0)
RED = Color(255, 0, 0)
GREEN = Color(0, 255, 0)
BLUE = Color(0, 0, 255)
YELLOW = Color(255, 255, 0)
PURPLE = Color(255, 0, 255)
| 20.416667
| 37
| 0.636735
|
from .color import Color
TRANSPARENT = Color(255, 255, 255, 0)
WHITE = Color(255, 255, 255)
BLACK = Color(0, 0, 0)
RED = Color(255, 0, 0)
GREEN = Color(0, 255, 0)
BLUE = Color(0, 0, 255)
YELLOW = Color(255, 255, 0)
PURPLE = Color(255, 0, 255)
| true
| true
|
1c412b8e6df6f6e57732bfcd32cf011a354ffa83
| 1,859
|
py
|
Python
|
pylgbst/comms/cpygatt.py
|
karatheodory/pylgbst
|
2aa82a818623fe8ecb8a6fc1c6ae083e56debd19
|
[
"MIT"
] | 1
|
2021-04-04T13:31:28.000Z
|
2021-04-04T13:31:28.000Z
|
pylgbst/comms/cpygatt.py
|
karatheodory/pylgbst
|
2aa82a818623fe8ecb8a6fc1c6ae083e56debd19
|
[
"MIT"
] | null | null | null |
pylgbst/comms/cpygatt.py
|
karatheodory/pylgbst
|
2aa82a818623fe8ecb8a6fc1c6ae083e56debd19
|
[
"MIT"
] | null | null | null |
import logging
import pygatt
from pylgbst.comms import Connection, LEGO_MOVE_HUB
from pylgbst.constants import MOVE_HUB_HW_UUID_CHAR
from pylgbst.utilities import str2hex
log = logging.getLogger('comms-pygatt')
class GattoolConnection(Connection):
"""
Used for connecting to
:type _conn_hnd: pygatt.backends.bgapi.device.BGAPIBLEDevice
"""
def __init__(self, controller='hci0'):
Connection.__init__(self)
self.backend = lambda: pygatt.GATTToolBackend(hci_device=controller)
self._conn_hnd = None
def connect(self, hub_mac=None):
log.debug("Trying to connect client to MoveHub with MAC: %s", hub_mac)
adapter = self.backend()
adapter.start()
while not self._conn_hnd:
log.info("Discovering devices...")
devices = adapter.scan(1)
log.debug("Devices: %s", devices)
for dev in devices:
address = dev['address']
name = dev['name']
if name == LEGO_MOVE_HUB or hub_mac == address:
logging.info("Found %s at %s", name, address)
self._conn_hnd = adapter.connect(address)
break
if self._conn_hnd:
break
return self
def disconnect(self):
self._conn_hnd.disconnect()
def write(self, handle, data):
log.debug("Writing to handle %s: %s", handle, str2hex(data))
return self._conn_hnd.char_write_handle(handle, bytearray(data))
def set_notify_handler(self, handler):
self._conn_hnd.subscribe(MOVE_HUB_HW_UUID_CHAR, handler)
def is_alive(self):
return True
class BlueGigaConnection(GattoolConnection):
def __init__(self):
super(BlueGigaConnection, self).__init__()
self.backend = lambda: pygatt.BGAPIBackend()
| 28.6
| 78
| 0.631522
|
import logging
import pygatt
from pylgbst.comms import Connection, LEGO_MOVE_HUB
from pylgbst.constants import MOVE_HUB_HW_UUID_CHAR
from pylgbst.utilities import str2hex
log = logging.getLogger('comms-pygatt')
class GattoolConnection(Connection):
def __init__(self, controller='hci0'):
Connection.__init__(self)
self.backend = lambda: pygatt.GATTToolBackend(hci_device=controller)
self._conn_hnd = None
def connect(self, hub_mac=None):
log.debug("Trying to connect client to MoveHub with MAC: %s", hub_mac)
adapter = self.backend()
adapter.start()
while not self._conn_hnd:
log.info("Discovering devices...")
devices = adapter.scan(1)
log.debug("Devices: %s", devices)
for dev in devices:
address = dev['address']
name = dev['name']
if name == LEGO_MOVE_HUB or hub_mac == address:
logging.info("Found %s at %s", name, address)
self._conn_hnd = adapter.connect(address)
break
if self._conn_hnd:
break
return self
def disconnect(self):
self._conn_hnd.disconnect()
def write(self, handle, data):
log.debug("Writing to handle %s: %s", handle, str2hex(data))
return self._conn_hnd.char_write_handle(handle, bytearray(data))
def set_notify_handler(self, handler):
self._conn_hnd.subscribe(MOVE_HUB_HW_UUID_CHAR, handler)
def is_alive(self):
return True
class BlueGigaConnection(GattoolConnection):
def __init__(self):
super(BlueGigaConnection, self).__init__()
self.backend = lambda: pygatt.BGAPIBackend()
| true
| true
|
1c412ca1b9324bd4078b3f7938eaa783f06f5cac
| 2,879
|
py
|
Python
|
qanet/data.py
|
convergence-lab/QANet
|
5bffd105950304455245dfcfea9663d6c892aeca
|
[
"Apache-2.0"
] | null | null | null |
qanet/data.py
|
convergence-lab/QANet
|
5bffd105950304455245dfcfea9663d6c892aeca
|
[
"Apache-2.0"
] | null | null | null |
qanet/data.py
|
convergence-lab/QANet
|
5bffd105950304455245dfcfea9663d6c892aeca
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import numpy as np
import urllib.request
import zipfile
import spacy
data_dir = "data"
class SQuAD:
def __init__(self):
self.maybedownload()
with open(os.path.join(data_dir, 'train-v1.1.json')) as f:
self.train = json.load(f)
with open(os.path.join(data_dir, 'dev-v1.1.json')) as f:
self.dev = json.load(f)
# self.glove = self.load_glove()
self.embed()
def load_glove(self):
print("Loading GloVe Model")
f = open(os.path.join(data_dir, 'glove.6B.300d.txt'),'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print("Done.",len(model)," words loaded!")
return model
def maybedownload(self):
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(os.path.join(data_dir, 'train-v1.1.json')):
print("Downloading Squad")
response = urllib.request.urlopen('https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json')
train_json = response.read()
response = urllib.request.urlopen('https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json')
dev_json = response.read()
with open(os.path.join(data_dir, 'train-v1.1.json'), 'w') as f:
f.write(train_json)
with open(os.path.join(data_dir, 'dev-v1.1.json'), 'w') as f:
f.write(dev_json)
if not os.path.exists(os.path.join(data_dir, 'glove.6B.zip')):
print("Downloading GloVe")
response = urllib.request.urlopen('http://nlp.stanford.edu/data/glove.6B.zip')
glove_zip = response.read()
with open(os.path.join(data_dir, 'glove.6B.zip'), 'wb') as f:
f.write(glove_zip)
zip_ref = zipfile.ZipFile(os.path.join(data_dir, 'glove.6B.zip'), 'r')
zip_ref.extractall(data_dir)
zip_ref.close()
def embed(self):
self.embed_data(self.train)
def embed_data(self, data):
nlp = spacy.load('en')
data_list = data['data']
for data_elem in data_list:
paragraphs = data_elem['paragraphs']
for paragraph in paragraphs:
context = paragraph['context']
doc = nlp(context)
print([(w.text, w.pos_) for w in doc])
qas = paragraph['qas']
for qa in qas:
answers = qa['answers']
for ans in answers:
start = ans['answer_start']
end = start + len(ans['text'])
sys.exit(0)
if __name__ == '__main__':
squad = SQuAD()
| 37.38961
| 115
| 0.551928
|
import os
import json
import numpy as np
import urllib.request
import zipfile
import spacy
data_dir = "data"
class SQuAD:
def __init__(self):
self.maybedownload()
with open(os.path.join(data_dir, 'train-v1.1.json')) as f:
self.train = json.load(f)
with open(os.path.join(data_dir, 'dev-v1.1.json')) as f:
self.dev = json.load(f)
self.embed()
def load_glove(self):
print("Loading GloVe Model")
f = open(os.path.join(data_dir, 'glove.6B.300d.txt'),'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print("Done.",len(model)," words loaded!")
return model
def maybedownload(self):
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(os.path.join(data_dir, 'train-v1.1.json')):
print("Downloading Squad")
response = urllib.request.urlopen('https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json')
train_json = response.read()
response = urllib.request.urlopen('https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json')
dev_json = response.read()
with open(os.path.join(data_dir, 'train-v1.1.json'), 'w') as f:
f.write(train_json)
with open(os.path.join(data_dir, 'dev-v1.1.json'), 'w') as f:
f.write(dev_json)
if not os.path.exists(os.path.join(data_dir, 'glove.6B.zip')):
print("Downloading GloVe")
response = urllib.request.urlopen('http://nlp.stanford.edu/data/glove.6B.zip')
glove_zip = response.read()
with open(os.path.join(data_dir, 'glove.6B.zip'), 'wb') as f:
f.write(glove_zip)
zip_ref = zipfile.ZipFile(os.path.join(data_dir, 'glove.6B.zip'), 'r')
zip_ref.extractall(data_dir)
zip_ref.close()
def embed(self):
self.embed_data(self.train)
def embed_data(self, data):
nlp = spacy.load('en')
data_list = data['data']
for data_elem in data_list:
paragraphs = data_elem['paragraphs']
for paragraph in paragraphs:
context = paragraph['context']
doc = nlp(context)
print([(w.text, w.pos_) for w in doc])
qas = paragraph['qas']
for qa in qas:
answers = qa['answers']
for ans in answers:
start = ans['answer_start']
end = start + len(ans['text'])
sys.exit(0)
if __name__ == '__main__':
squad = SQuAD()
| true
| true
|
1c412cdbba998677e1843b149ab62ebeb5dd0324
| 554
|
py
|
Python
|
people/migrations/0006_auto_20190701_2335.py
|
s-a-f-e/backend
|
6018f51466df9abd58f25729d91856842eee9509
|
[
"MIT"
] | 1
|
2019-05-06T19:40:43.000Z
|
2019-05-06T19:40:43.000Z
|
people/migrations/0006_auto_20190701_2335.py
|
s-a-f-e/backend
|
6018f51466df9abd58f25729d91856842eee9509
|
[
"MIT"
] | 9
|
2019-12-04T22:57:46.000Z
|
2022-02-10T07:15:11.000Z
|
people/migrations/0006_auto_20190701_2335.py
|
s-a-f-e/backend
|
6018f51466df9abd58f25729d91856842eee9509
|
[
"MIT"
] | 3
|
2019-05-01T20:41:33.000Z
|
2019-10-03T20:57:00.000Z
|
# Generated by Django 2.2.1 on 2019-07-01 23:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0005_healthcenter'),
]
operations = [
migrations.AddField(
model_name='mother',
name='dueMonth',
field=models.CharField(blank=True, max_length=15),
),
migrations.AddField(
model_name='mother',
name='dueYear',
field=models.CharField(blank=True, max_length=5),
),
]
| 23.083333
| 62
| 0.574007
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0005_healthcenter'),
]
operations = [
migrations.AddField(
model_name='mother',
name='dueMonth',
field=models.CharField(blank=True, max_length=15),
),
migrations.AddField(
model_name='mother',
name='dueYear',
field=models.CharField(blank=True, max_length=5),
),
]
| true
| true
|
1c412d7d5d86f9b4652e5ca074ac4b3f6637bab5
| 4,216
|
py
|
Python
|
tests/custom_cluster/test_delegation.py
|
suifengzhuliu/impala
|
611f4c6f3b18cfcddff3b2956cbb87c295a87655
|
[
"Apache-2.0"
] | 1
|
2019-12-14T03:09:50.000Z
|
2019-12-14T03:09:50.000Z
|
tests/custom_cluster/test_delegation.py
|
suifengzhuliu/impala
|
611f4c6f3b18cfcddff3b2956cbb87c295a87655
|
[
"Apache-2.0"
] | null | null | null |
tests/custom_cluster/test_delegation.py
|
suifengzhuliu/impala
|
611f4c6f3b18cfcddff3b2956cbb87c295a87655
|
[
"Apache-2.0"
] | 1
|
2020-04-02T18:25:34.000Z
|
2020-04-02T18:25:34.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import getpass
import pytest
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
from TCLIService import TCLIService
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
USER_NAME = getpass.getuser()
PROXY_USER = "proxy_user_name"
PROXY_USER_WITH_COMMA = "proxy_user,name_2"
PROXY_USERS_ALL = "proxy_user_name/proxy_user,name_2"
PROXY_USER_DELIMITER = "/"
class TestDelegation(CustomClusterTestSuite, HS2TestSuite):
def check_user_and_effective_user(self, proxy_user):
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.confOverlay = dict()
execute_statement_req.statement = \
"SELECT effective_user(), current_user(), user(), session_user()";
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
assert (self.column_results_to_string(fetch_results_resp.results.columns) ==
(1, "%s, %s, %s, %s\n" % (proxy_user, proxy_user,
USER_NAME, USER_NAME)))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
"--authorized_proxy_user_config=\"%s=%s\"" % (USER_NAME, PROXY_USER))
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
conf_overlay = {"impala.doas.user": PROXY_USER})
def test_effective_user_single_proxy(self):
'''Test that the effective user is correctly set when impala.doas.user is correct, and
that the effective_user() builtin returns the right thing'''
self.check_user_and_effective_user(PROXY_USER)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
"--authorized_proxy_user_config=\"%s=%s\"\
--authorized_proxy_user_config_delimiter=%c" % (USER_NAME, PROXY_USERS_ALL,
PROXY_USER_DELIMITER))
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
conf_overlay = {"impala.doas.user": PROXY_USER_WITH_COMMA})
def test_effective_user_multiple_proxies(self):
'''Test that the effective user is correctly set when there are multiple proxy users
seperated with the authorized_user_proxy_config_delimiter and when impala.doas.user
is correct, and that the effective_user() builtin returns the right thing'''
self.check_user_and_effective_user(PROXY_USER_WITH_COMMA)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
"--authorized_proxy_user_config=\"%s=%s\"\
--authorized_proxy_user_config_delimiter=" % (USER_NAME, PROXY_USER_WITH_COMMA))
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
conf_overlay = {"impala.doas.user": PROXY_USER_WITH_COMMA})
def test_effective_user_empty_delimiter(self):
'''Test that the effective user is correctly set when the
authorized_user_proxy_config_delimiter is set to the empty string and
impala.doas.user is correct, and that the effective_user() builtin returns the
right thing'''
self.check_user_and_effective_user(PROXY_USER_WITH_COMMA)
| 49.6
| 90
| 0.775854
|
import getpass
import pytest
from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
from TCLIService import TCLIService
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
USER_NAME = getpass.getuser()
PROXY_USER = "proxy_user_name"
PROXY_USER_WITH_COMMA = "proxy_user,name_2"
PROXY_USERS_ALL = "proxy_user_name/proxy_user,name_2"
PROXY_USER_DELIMITER = "/"
class TestDelegation(CustomClusterTestSuite, HS2TestSuite):
def check_user_and_effective_user(self, proxy_user):
execute_statement_req = TCLIService.TExecuteStatementReq()
execute_statement_req.sessionHandle = self.session_handle
execute_statement_req.confOverlay = dict()
execute_statement_req.statement = \
"SELECT effective_user(), current_user(), user(), session_user()";
execute_statement_resp = self.hs2_client.ExecuteStatement(execute_statement_req)
HS2TestSuite.check_response(execute_statement_resp)
fetch_results_req = TCLIService.TFetchResultsReq()
fetch_results_req.operationHandle = execute_statement_resp.operationHandle
fetch_results_req.maxRows = 1
fetch_results_resp = self.hs2_client.FetchResults(fetch_results_req)
HS2TestSuite.check_response(fetch_results_resp)
assert (self.column_results_to_string(fetch_results_resp.results.columns) ==
(1, "%s, %s, %s, %s\n" % (proxy_user, proxy_user,
USER_NAME, USER_NAME)))
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
"--authorized_proxy_user_config=\"%s=%s\"" % (USER_NAME, PROXY_USER))
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
conf_overlay = {"impala.doas.user": PROXY_USER})
def test_effective_user_single_proxy(self):
self.check_user_and_effective_user(PROXY_USER)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
"--authorized_proxy_user_config=\"%s=%s\"\
--authorized_proxy_user_config_delimiter=%c" % (USER_NAME, PROXY_USERS_ALL,
PROXY_USER_DELIMITER))
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
conf_overlay = {"impala.doas.user": PROXY_USER_WITH_COMMA})
def test_effective_user_multiple_proxies(self):
self.check_user_and_effective_user(PROXY_USER_WITH_COMMA)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
"--authorized_proxy_user_config=\"%s=%s\"\
--authorized_proxy_user_config_delimiter=" % (USER_NAME, PROXY_USER_WITH_COMMA))
@needs_session(TCLIService.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6,
conf_overlay = {"impala.doas.user": PROXY_USER_WITH_COMMA})
def test_effective_user_empty_delimiter(self):
self.check_user_and_effective_user(PROXY_USER_WITH_COMMA)
| true
| true
|
1c412e7b4c9fb7c412161fce8a832ca334bba45a
| 3,586
|
py
|
Python
|
app/recipe/views.py
|
KmrAbhi/recipe-app-api
|
9c6871c80a0479a95727a354c694179b5164629b
|
[
"MIT"
] | null | null | null |
app/recipe/views.py
|
KmrAbhi/recipe-app-api
|
9c6871c80a0479a95727a354c694179b5164629b
|
[
"MIT"
] | null | null | null |
app/recipe/views.py
|
KmrAbhi/recipe-app-api
|
9c6871c80a0479a95727a354c694179b5164629b
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets, mixins, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Tag, Ingredient, Recipe
from recipe import serializers
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base class for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Returns objects for the current authenticated user"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user).order_by('-name').distinct()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
"""convert a list of string Ids to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids) ## syntax to filter on foreign keys
if ingredients:
ingredients_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredients_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| 35.156863
| 97
| 0.672895
|
from rest_framework import viewsets, mixins, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Tag, Ingredient, Recipe
from recipe import serializers
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False)
return queryset.filter(
user=self.request.user).order_by('-name').distinct()
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def _params_to_ints(self, qs):
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids) ingredients_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredients_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
| true
| true
|
1c412eb3a1a14c12d83589821c785a0b8b73ec5d
| 207
|
py
|
Python
|
django_town/social/define/__init__.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
django_town/social/define/__init__.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
django_town/social/define/__init__.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
__author__ = 'uptown'
from django_town.social.define.place_categories import *
from django_town.social.define.privacy_options import PRIVACY_OPTIONS
from django_town.social.define.page_role import PAGE_ROLE
| 41.4
| 69
| 0.864734
|
__author__ = 'uptown'
from django_town.social.define.place_categories import *
from django_town.social.define.privacy_options import PRIVACY_OPTIONS
from django_town.social.define.page_role import PAGE_ROLE
| true
| true
|
1c412f4130ff5f913dace2ba173b9fc3ee9e201a
| 24,090
|
py
|
Python
|
lib/googlecloudsdk/calliope/arg_parsers.py
|
ianel20/google-cloud-sdk
|
36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/calliope/arg_parsers.py
|
ianel20/google-cloud-sdk
|
36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/calliope/arg_parsers.py
|
ianel20/google-cloud-sdk
|
36ed4e06ba3961d0a8fbf30a3eaabf7db6d4e9c3
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:23:41.000Z
|
2020-07-25T12:23:41.000Z
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that provides parsing utilities for argparse.
For details of how argparse argument pasers work, see:
http://docs.python.org/dev/library/argparse.html#type
Example usage:
import argparse
import arg_parsers
parser = argparse.ArgumentParser()
parser.add_argument(
'--metadata',
type=arg_parsers.ArgDict(),
action=arg_parser.FloatingListValuesCatcher())
parser.add_argument(
'--delay',
default='5s',
type=arg_parsers.Duration(lower_bound='1s', upper_bound='10s')
parser.add_argument(
'--disk-size',
default='10GB',
type=arg_parsers.BinarySize(lower_bound='1GB', upper_bound='10TB')
res = parser.parse_args(
'--names --metadata x=y,a=b,c=d --delay 1s --disk-size 10gb'.split())
assert res.metadata == {'a': 'b', 'c': 'd', 'x': 'y'}
assert res.delay == 1
assert res.disk_size == 10737418240
"""
import argparse
import datetime
import re
from googlecloudsdk.core import log
__all__ = ['Duration', 'BinarySize']
class Error(Exception):
"""Exceptions that are defined by this module."""
class ArgumentTypeError(Error, argparse.ArgumentTypeError):
"""Exceptions for parsers that are used as argparse types."""
class ArgumentParsingError(Error, argparse.ArgumentError):
"""Raised when there is a problem with user input.
argparse.ArgumentError takes both the action and a message as constructor
parameters.
"""
def _GenerateErrorMessage(error, user_input=None, error_idx=None):
"""Constructs an error message for an exception.
Args:
error: str, The error message that should be displayed. This
message should not end with any punctuation--the full error
message is constructed by appending more information to error.
user_input: str, The user input that caused the error.
error_idx: int, The index at which the error occurred. If None,
the index will not be printed in the error message.
Returns:
str: The message to use for the exception.
"""
if user_input is None:
return error
elif not user_input: # Is input empty?
return error + '; received empty string'
elif error_idx is None:
return error + '; received: ' + user_input
return ('{error_message} at index {error_idx}: {user_input}'
.format(error_message=error, user_input=user_input,
error_idx=error_idx))
_VALUE_PATTERN = r"""
^ # Beginning of input marker.
(?P<amount>\d+) # Amount.
((?P<unit>[a-zA-Z]+))? # Optional unit.
$ # End of input marker.
"""
_SECOND = 1
_MINUTE = 60 * _SECOND
_HOUR = 60 * _MINUTE
_DAY = 24 * _HOUR
# The units are adopted from sleep(1):
# http://linux.die.net/man/1/sleep
_DURATION_SCALES = {
's': _SECOND,
'm': _MINUTE,
'h': _HOUR,
'd': _DAY,
}
_BINARY_SIZE_SCALES = {
'B': 1,
'KB': 1 << 10,
'MB': 1 << 20,
'GB': 1 << 30,
'TB': 1 << 40,
'PB': 1 << 50,
'KiB': 1 << 10,
'MiB': 1 << 20,
'GiB': 1 << 30,
'TiB': 1 << 40,
'PiB': 1 << 50,
}
def GetMultiCompleter(individual_completer):
"""Create a completer to handle completion for comma separated lists.
Args:
individual_completer: A function that completes an individual element.
Returns:
A function that completes the last element of the list.
"""
def MultiCompleter(prefix, parsed_args, **kwargs):
start = ''
lst = prefix.rsplit(',', 1)
if len(lst) > 1:
start = lst[0] + ','
prefix = lst[1]
matches = individual_completer(prefix, parsed_args, **kwargs)
return [start+match for match in matches]
return MultiCompleter
def _ValueParser(scales, default_unit, lower_bound=None, upper_bound=None,
strict_case=True):
"""A helper that returns a function that can parse values with units.
Casing for all units matters.
Args:
scales: {str: int}, A dictionary mapping units to their magnitudes in
relation to the lowest magnitude unit in the dict.
default_unit: str, The default unit to use if the user's input is
missing unit.
lower_bound: str, An inclusive lower bound.
upper_bound: str, An inclusive upper bound.
strict_case: bool, whether to be strict on case-checking
Returns:
A function that can parse values.
"""
def UnitsByMagnitude():
"""Returns a list of the units in scales sorted by magnitude."""
return [key for key, _
in sorted(scales.iteritems(), key=lambda value: value[1])]
def Parse(value):
"""Parses value that can contain a unit."""
match = re.match(_VALUE_PATTERN, value, re.VERBOSE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'given value must be of the form INTEGER[UNIT] where units '
'can be one of {0}'
.format(', '.join(UnitsByMagnitude())),
user_input=value))
amount = int(match.group('amount'))
unit = match.group('unit')
if strict_case:
unit_case = unit
default_unit_case = default_unit
scales_case = scales
else:
unit_case = unit and unit.upper()
default_unit_case = default_unit.upper()
scales_case = dict([(k.upper(), v) for k, v in scales.items()])
if unit_case is None:
return amount * scales_case[default_unit_case]
elif unit_case in scales_case:
return amount * scales_case[unit_case]
else:
raise ArgumentTypeError(_GenerateErrorMessage(
'unit must be one of {0}'.format(', '.join(UnitsByMagnitude())),
user_input=unit))
if lower_bound is None:
parsed_lower_bound = None
else:
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
"""Same as Parse except bound checking is performed."""
if value is None:
return None
else:
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
elif parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
else:
return parsed_value
return ParseWithBoundsChecking
def Duration(lower_bound=None, upper_bound=None):
"""Returns a function that can parse time durations.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "s", "m", "h", and
"d" for seconds, seconds, minutes, hours, and days,
respectively. The casing of the units matters.
If the unit is omitted, seconds is assumed.
The result is parsed in seconds. For example:
parser = Duration()
assert parser('10s') == 10
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single time duration as input to be
parsed.
"""
return _ValueParser(_DURATION_SCALES, default_unit='s',
lower_bound=lower_bound, upper_bound=upper_bound)
def BinarySize(lower_bound=None, upper_bound=None):
"""Returns a function that can parse binary sizes.
Binary sizes are defined as base-2 values representing number of
bytes.
Input to the parsing function must be a string of the form:
INTEGER[UNIT]
The integer must be non-negative. Valid units are "B", "KB", "MB",
"GB", "TB", "KiB", "MiB", "GiB", "TiB", "PiB". If the unit is
omitted, GB is assumed.
The result is parsed in bytes. For example:
parser = BinarySize()
assert parser('10GB') == 1073741824
Args:
lower_bound: str, An inclusive lower bound for values.
upper_bound: str, An inclusive upper bound for values.
Raises:
ArgumentTypeError: If either the lower_bound or upper_bound
cannot be parsed. The returned function will also raise this
error if it cannot parse its input. This exception is also
raised if the returned function receives an out-of-bounds
input.
Returns:
A function that accepts a single binary size as input to be
parsed.
"""
return _ValueParser(_BINARY_SIZE_SCALES, default_unit='GB',
lower_bound=lower_bound, upper_bound=upper_bound,
strict_case=False)
_KV_PAIR_DELIMITER = '='
class HostPort(object):
"""A class for holding host and port information."""
IPV4_OR_HOST_PATTERN = r'^(?P<address>[\w\d\.-]+)?(:|:(?P<port>[\d]+))?$'
# includes hostnames
IPV6_PATTERN = r'^(\[(?P<address>[\w\d:]+)\])(:|:(?P<port>[\d]+))?$'
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s, ipv6_enabled=False):
"""Parse the given string into a HostPort object.
This can be used as an argparse type.
Args:
s: str, The string to parse. If ipv6_enabled and host is an IPv6 address,
it should be placed in square brackets: e.g.
[2001:db8:0:0:0:ff00:42:8329]
or
[2001:db8:0:0:0:ff00:42:8329]:8080
ipv6_enabled: boolean, If True then accept IPv6 addresses.
Raises:
ArgumentTypeError: If the string is not valid.
Returns:
HostPort, The parsed object.
"""
if not s:
return HostPort(None, None)
match = re.match(HostPort.IPV4_OR_HOST_PATTERN, s, re.UNICODE)
if ipv6_enabled and not match:
match = re.match(HostPort.IPV6_PATTERN, s, re.UNICODE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'or\n\n'
' [IPv6_ADDRESS]:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
elif not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
return HostPort(match.group('address'), match.group('port'))
class Day(object):
"""A class for parsing a datetime object for a specific day."""
@staticmethod
def Parse(s):
if not s:
return None
try:
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage(
"Failed to parse date. Value should be in the form 'YYYY-MM-DD",
user_input=s))
class Datetime(object):
"""A class for parsing a datetime object in UTC timezone."""
@staticmethod
def Parse(s):
"""Parses a string value into a Datetime object."""
if not s:
return None
accepted_formats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ')
# TODO(user): Add timezone support.
for date_format in accepted_formats:
try:
return datetime.datetime.strptime(s, date_format)
except ValueError:
pass
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date. Value should be in ISO or RFC3339 format',
user_input=s))
def _BoundedType(type_builder, type_description,
lower_bound=None, upper_bound=None, unlimited=False):
"""Returns a function that can parse given type within some bound.
Args:
type_builder: A callable for building the requested type from the value
string.
type_description: str, Description of the requested type (for verbose
messages).
lower_bound: of type compatible with type_builder,
The value must be >= lower_bound.
upper_bound: of type compatible with type_builder,
The value must be <= upper_bound.
unlimited: bool, If True then a value of 'unlimited' means no limit.
Returns:
A function that can parse given type within some bound.
"""
def Parse(value):
"""Parses value as a type constructed by type_builder.
Args:
value: str, Value to be converted to the requested type.
Raises:
ArgumentTypeError: If the provided value is out of bounds or unparsable.
Returns:
Value converted to the requested type.
"""
if unlimited and value == 'unlimited':
return None
try:
v = type_builder(value)
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage('Value must be {0}'.format(type_description),
user_input=value))
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return Parse
def BoundedInt(*args, **kwargs):
return _BoundedType(int, 'an integer', *args, **kwargs)
def BoundedFloat(*args, **kwargs):
return _BoundedType(float, 'a floating point number', *args, **kwargs)
def _TokenizeQuotedList(arg_value, delim=','):
"""Tokenize an argument into a list.
Args:
arg_value: str, The raw argument.
delim: str, The delimiter on which to split the argument string.
Returns:
[str], The tokenized list.
"""
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
"""Base class for arg types."""
class ArgList(ArgType):
"""Interpret an argument value as a list.
Intended to be used as the type= for a flag argument. Splits the string on
commas or another delimiter and returns a list.
By default, splits on commas:
'a,b,c' -> ['a', 'b', 'c']
There is an available syntax for using an alternate delimiter:
'^:^a,b:c' -> ['a,b', 'c']
'^::^a:b::c' -> ['a:b', 'c']
'^,^^a^,b,c' -> ['^a^', ',b', 'c']
"""
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self, element_type=None, min_length=0, max_length=None,
choices=None):
"""Initialize an ArgList.
Args:
element_type: (str)->str, A function to apply to each of the list items.
min_length: int, The minimum size of the list.
max_length: int, The maximum size of the list.
choices: [element_type], a list of valid possibilities for elements. If
None, then no constraints are imposed.
Returns:
(str)->[str], A function to parse the list of values in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
"""
self.element_type = element_type
if choices:
def ChoiceType(raw_value):
if element_type:
typed_value = element_type(raw_value)
else:
typed_value = raw_value
if typed_value not in choices:
raise ArgumentTypeError('{value} must be one of [{choices}]'.format(
value=typed_value, choices=', '.join(
[str(choice) for choice in choices])))
return typed_value
self.element_type = ChoiceType
self.min_length = min_length
self.max_length = max_length
def __call__(self, arg_value): # pylint:disable=missing-docstring
delim = self.DEFAULT_DELIM_CHAR
if (arg_value.startswith(self.ALT_DELIM_CHAR) and
self.ALT_DELIM_CHAR in arg_value[1:]):
delim, arg_value = arg_value[1:].split(self.ALT_DELIM_CHAR, 1)
if not delim:
raise ArgumentTypeError(
'Invalid delimiter. Please see `gcloud topic escaping` for '
'information on escaping list or dictionary flag values.')
arg_list = _TokenizeQuotedList(arg_value, delim=delim)
# TODO(user): These exceptions won't present well to the user.
if len(arg_list) < self.min_length:
raise ArgumentTypeError('not enough args')
if self.max_length is not None and len(arg_list) > self.max_length:
raise ArgumentTypeError('too many args')
if self.element_type:
arg_list = [self.element_type(arg) for arg in arg_list]
return arg_list
class ArgDict(ArgList):
"""Interpret an argument value as a dict.
Intended to be used as the type= for a flag argument. Splits the string on
commas to get a list, and then splits the items on equals to get a set of
key-value pairs to get a dict.
"""
def __init__(self, value_type=None, spec=None, min_length=0, max_length=None):
"""Initialize an ArgDict.
Args:
value_type: (str)->str, A function to apply to each of the dict values.
spec: {str: (str)->str}, A mapping of expected keys to functions.
The functions are applied to the values. If None, an arbitrary
set of keys will be accepted. If not None, it is an error for the
user to supply a key that is not in the spec.
min_length: int, The minimum number of keys in the dict.
max_length: int, The maximum number of keys in the dict.
Returns:
(str)->{str:str}, A function to parse the dict in the argument.
Raises:
ArgumentTypeError: If the list is malformed.
ValueError: If both value_type and spec are provided.
"""
super(ArgDict, self).__init__(min_length=min_length, max_length=max_length)
if spec and value_type:
raise ValueError('cannot have both spec and sub_type')
self.value_type = value_type
self.spec = spec
def _ApplySpec(self, key, value):
if key in self.spec:
return self.spec[key](value)
else:
raise ArgumentTypeError(
_GenerateErrorMessage(
'valid keys are {0}'.format(
', '.join(sorted(self.spec.keys()))),
user_input=key))
def __call__(self, arg_value): # pylint:disable=missing-docstring
arg_list = super(ArgDict, self).__call__(arg_value)
arg_dict = {}
for arg in arg_list:
split_arg = arg.split('=', 1) # only use the first =
# TODO(user): These exceptions won't present well to the user.
if len(split_arg) != 2:
raise ArgumentTypeError(
('Bad syntax for dict arg: {0}. Please see `gcloud topic escaping` '
'if you would like information on escaping list or dictionary '
'flag values.').format(repr(arg)))
key, value = split_arg
if not key:
raise ArgumentTypeError('bad key for dict arg: '+repr(arg))
if self.value_type:
value = self.value_type(value)
if self.spec:
value = self._ApplySpec(key, value)
arg_dict[key] = value
return arg_dict
# pylint:disable=protected-access
def FloatingListValuesCatcher(
action=argparse._StoreAction, switch_value=None):
"""Create an action for catching floating list values.
Args:
action: argparse.Action, the superclass of the new action.
switch_value: obj, If not none, allow users to specify no value for the
flag. If the flag is given and no value is specified, the switch_value
will be used instead.
Returns:
argparse.Action, an action that will catch list values separated by spaces.
"""
class FloatingListValuesCatcherAction(action):
"""This is to assist with refactoring argument lists.
Provides a error for users who type (or have a script) that specifies a list
with the elements in different arguments. eg.
$ gcloud sql instances create foo --authorized-networks x y
usage: gcloud sql instances create INSTANCE [optional flags]
ERROR: (gcloud.sql.instances.create) argument --authorized-networks: lists
are separated by commas, try "--authorized-networks=x,y"
To do this, with flags that used to (but no longer) have nargs set to take
multiple values we apply an action designed to catch them by transparently
setting nargs to '+', and then making sure only 1 value is provided.
As a caveat, this means that people still cannot put positional arguments
after the flags. So, this is a temporary mechanism designed to inform users,
and we'll remove it eventually.
"""
# TODO(user): remove this.
_NOLINT = True
def __init__(self, *args, **kwargs):
if 'nargs' in kwargs:
# Make sure nothing weird is happening, first. This action is intended
# only for use with --flags that have the type as ArgList or ArgDict,
# and do not set nargs at all.
raise ValueError(
'trying to catch floating lists for a misspecified flag list')
if switch_value is not None:
kwargs['nargs'] = '*'
else:
kwargs['nargs'] = '+'
super(FloatingListValuesCatcherAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if not values and switch_value is not None:
msg = (
'We noticed that you provided no value for flag [{flag}]. This '
'behavior is deprecated.\nInstead, please provide an empty string '
'as the explicit value (try [{flag} \'\']).').format(
flag=option_string)
log.warn(msg)
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, switch_value, option_string=option_string)
return
if len(values) > 1:
class ArgShell(object):
"""Class designed to trick argparse into displaying a nice error."""
def __init__(self, name):
self.option_strings = [name]
suggestions = []
if values and isinstance(values[0], dict):
aggregate_value = {}
for valdict in values:
aggregate_value.update(valdict)
suggestions.extend(
['%s=%s' % (k, v) for k, v in valdict.iteritems()])
if values and isinstance(values[0], list):
aggregate_value = []
suggestions.extend(
[','.join(map(str, vallist)) for vallist in values])
for vallist in values:
aggregate_value.extend(vallist)
extras = suggestions[1:]
msg = (
'We noticed that you are using space-separated lists, which are '
'deprecated. '
'Please transition to using comma-separated lists instead '
'(try "{flag} {values}"). '
'If you intend to use [{extras}] as positional arguments, put the '
'flags at the end.').format(
flag=option_string,
values=','.join(suggestions),
extras=', '.join(extras))
raise argparse.ArgumentError(ArgShell(option_string), msg)
else:
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, values[0], option_string=option_string)
return FloatingListValuesCatcherAction
| 32.379032
| 80
| 0.653881
|
import argparse
import datetime
import re
from googlecloudsdk.core import log
__all__ = ['Duration', 'BinarySize']
class Error(Exception):
class ArgumentTypeError(Error, argparse.ArgumentTypeError):
class ArgumentParsingError(Error, argparse.ArgumentError):
def _GenerateErrorMessage(error, user_input=None, error_idx=None):
if user_input is None:
return error
elif not user_input:
return error + '; received empty string'
elif error_idx is None:
return error + '; received: ' + user_input
return ('{error_message} at index {error_idx}: {user_input}'
.format(error_message=error, user_input=user_input,
error_idx=error_idx))
_VALUE_PATTERN = r"""
^ # Beginning of input marker.
(?P<amount>\d+) # Amount.
((?P<unit>[a-zA-Z]+))? # Optional unit.
$ # End of input marker.
"""
_SECOND = 1
_MINUTE = 60 * _SECOND
_HOUR = 60 * _MINUTE
_DAY = 24 * _HOUR
_DURATION_SCALES = {
's': _SECOND,
'm': _MINUTE,
'h': _HOUR,
'd': _DAY,
}
_BINARY_SIZE_SCALES = {
'B': 1,
'KB': 1 << 10,
'MB': 1 << 20,
'GB': 1 << 30,
'TB': 1 << 40,
'PB': 1 << 50,
'KiB': 1 << 10,
'MiB': 1 << 20,
'GiB': 1 << 30,
'TiB': 1 << 40,
'PiB': 1 << 50,
}
def GetMultiCompleter(individual_completer):
def MultiCompleter(prefix, parsed_args, **kwargs):
start = ''
lst = prefix.rsplit(',', 1)
if len(lst) > 1:
start = lst[0] + ','
prefix = lst[1]
matches = individual_completer(prefix, parsed_args, **kwargs)
return [start+match for match in matches]
return MultiCompleter
def _ValueParser(scales, default_unit, lower_bound=None, upper_bound=None,
strict_case=True):
def UnitsByMagnitude():
return [key for key, _
in sorted(scales.iteritems(), key=lambda value: value[1])]
def Parse(value):
match = re.match(_VALUE_PATTERN, value, re.VERBOSE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'given value must be of the form INTEGER[UNIT] where units '
'can be one of {0}'
.format(', '.join(UnitsByMagnitude())),
user_input=value))
amount = int(match.group('amount'))
unit = match.group('unit')
if strict_case:
unit_case = unit
default_unit_case = default_unit
scales_case = scales
else:
unit_case = unit and unit.upper()
default_unit_case = default_unit.upper()
scales_case = dict([(k.upper(), v) for k, v in scales.items()])
if unit_case is None:
return amount * scales_case[default_unit_case]
elif unit_case in scales_case:
return amount * scales_case[unit_case]
else:
raise ArgumentTypeError(_GenerateErrorMessage(
'unit must be one of {0}'.format(', '.join(UnitsByMagnitude())),
user_input=unit))
if lower_bound is None:
parsed_lower_bound = None
else:
parsed_lower_bound = Parse(lower_bound)
if upper_bound is None:
parsed_upper_bound = None
else:
parsed_upper_bound = Parse(upper_bound)
def ParseWithBoundsChecking(value):
if value is None:
return None
else:
parsed_value = Parse(value)
if parsed_lower_bound is not None and parsed_value < parsed_lower_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
elif parsed_upper_bound is not None and parsed_value > parsed_upper_bound:
raise ArgumentTypeError(_GenerateErrorMessage(
'value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
else:
return parsed_value
return ParseWithBoundsChecking
def Duration(lower_bound=None, upper_bound=None):
return _ValueParser(_DURATION_SCALES, default_unit='s',
lower_bound=lower_bound, upper_bound=upper_bound)
def BinarySize(lower_bound=None, upper_bound=None):
return _ValueParser(_BINARY_SIZE_SCALES, default_unit='GB',
lower_bound=lower_bound, upper_bound=upper_bound,
strict_case=False)
_KV_PAIR_DELIMITER = '='
class HostPort(object):
IPV4_OR_HOST_PATTERN = r'^(?P<address>[\w\d\.-]+)?(:|:(?P<port>[\d]+))?$'
IPV6_PATTERN = r'^(\[(?P<address>[\w\d:]+)\])(:|:(?P<port>[\d]+))?$'
def __init__(self, host, port):
self.host = host
self.port = port
@staticmethod
def Parse(s, ipv6_enabled=False):
if not s:
return HostPort(None, None)
match = re.match(HostPort.IPV4_OR_HOST_PATTERN, s, re.UNICODE)
if ipv6_enabled and not match:
match = re.match(HostPort.IPV6_PATTERN, s, re.UNICODE)
if not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'or\n\n'
' [IPv6_ADDRESS]:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
elif not match:
raise ArgumentTypeError(_GenerateErrorMessage(
'Failed to parse host and port. Expected format \n\n'
' IPv4_ADDRESS_OR_HOSTNAME:PORT\n\n'
'(where :PORT is optional).',
user_input=s))
return HostPort(match.group('address'), match.group('port'))
class Day(object):
@staticmethod
def Parse(s):
if not s:
return None
try:
return datetime.datetime.strptime(s, '%Y-%m-%d').date()
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage(
"Failed to parse date. Value should be in the form 'YYYY-MM-DD",
user_input=s))
class Datetime(object):
@staticmethod
def Parse(s):
if not s:
return None
accepted_formats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ')
# TODO(user): Add timezone support.
for date_format in accepted_formats:
try:
return datetime.datetime.strptime(s, date_format)
except ValueError:
pass
raise ArgumentTypeError(
_GenerateErrorMessage(
'Failed to parse date. Value should be in ISO or RFC3339 format',
user_input=s))
def _BoundedType(type_builder, type_description,
lower_bound=None, upper_bound=None, unlimited=False):
def Parse(value):
if unlimited and value == 'unlimited':
return None
try:
v = type_builder(value)
except ValueError:
raise ArgumentTypeError(
_GenerateErrorMessage('Value must be {0}'.format(type_description),
user_input=value))
if lower_bound is not None and v < lower_bound:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be greater than or equal to {0}'.format(lower_bound),
user_input=value))
if upper_bound is not None and upper_bound < v:
raise ArgumentTypeError(
_GenerateErrorMessage(
'Value must be less than or equal to {0}'.format(upper_bound),
user_input=value))
return v
return Parse
def BoundedInt(*args, **kwargs):
return _BoundedType(int, 'an integer', *args, **kwargs)
def BoundedFloat(*args, **kwargs):
return _BoundedType(float, 'a floating point number', *args, **kwargs)
def _TokenizeQuotedList(arg_value, delim=','):
if arg_value:
if not arg_value.endswith(delim):
arg_value += delim
return arg_value.split(delim)[:-1]
return []
class ArgType(object):
class ArgList(ArgType):
DEFAULT_DELIM_CHAR = ','
ALT_DELIM_CHAR = '^'
def __init__(self, element_type=None, min_length=0, max_length=None,
choices=None):
self.element_type = element_type
if choices:
def ChoiceType(raw_value):
if element_type:
typed_value = element_type(raw_value)
else:
typed_value = raw_value
if typed_value not in choices:
raise ArgumentTypeError('{value} must be one of [{choices}]'.format(
value=typed_value, choices=', '.join(
[str(choice) for choice in choices])))
return typed_value
self.element_type = ChoiceType
self.min_length = min_length
self.max_length = max_length
def __call__(self, arg_value): # pylint:disable=missing-docstring
delim = self.DEFAULT_DELIM_CHAR
if (arg_value.startswith(self.ALT_DELIM_CHAR) and
self.ALT_DELIM_CHAR in arg_value[1:]):
delim, arg_value = arg_value[1:].split(self.ALT_DELIM_CHAR, 1)
if not delim:
raise ArgumentTypeError(
'Invalid delimiter. Please see `gcloud topic escaping` for '
'information on escaping list or dictionary flag values.')
arg_list = _TokenizeQuotedList(arg_value, delim=delim)
# TODO(user): These exceptions won't present well to the user.
if len(arg_list) < self.min_length:
raise ArgumentTypeError('not enough args')
if self.max_length is not None and len(arg_list) > self.max_length:
raise ArgumentTypeError('too many args')
if self.element_type:
arg_list = [self.element_type(arg) for arg in arg_list]
return arg_list
class ArgDict(ArgList):
def __init__(self, value_type=None, spec=None, min_length=0, max_length=None):
super(ArgDict, self).__init__(min_length=min_length, max_length=max_length)
if spec and value_type:
raise ValueError('cannot have both spec and sub_type')
self.value_type = value_type
self.spec = spec
def _ApplySpec(self, key, value):
if key in self.spec:
return self.spec[key](value)
else:
raise ArgumentTypeError(
_GenerateErrorMessage(
'valid keys are {0}'.format(
', '.join(sorted(self.spec.keys()))),
user_input=key))
def __call__(self, arg_value):
arg_list = super(ArgDict, self).__call__(arg_value)
arg_dict = {}
for arg in arg_list:
split_arg = arg.split('=', 1)
if len(split_arg) != 2:
raise ArgumentTypeError(
('Bad syntax for dict arg: {0}. Please see `gcloud topic escaping` '
'if you would like information on escaping list or dictionary '
'flag values.').format(repr(arg)))
key, value = split_arg
if not key:
raise ArgumentTypeError('bad key for dict arg: '+repr(arg))
if self.value_type:
value = self.value_type(value)
if self.spec:
value = self._ApplySpec(key, value)
arg_dict[key] = value
return arg_dict
# pylint:disable=protected-access
def FloatingListValuesCatcher(
action=argparse._StoreAction, switch_value=None):
class FloatingListValuesCatcherAction(action):
# TODO(user): remove this.
_NOLINT = True
def __init__(self, *args, **kwargs):
if 'nargs' in kwargs:
# Make sure nothing weird is happening, first. This action is intended
# only for use with --flags that have the type as ArgList or ArgDict,
# and do not set nargs at all.
raise ValueError(
'trying to catch floating lists for a misspecified flag list')
if switch_value is not None:
kwargs['nargs'] = '*'
else:
kwargs['nargs'] = '+'
super(FloatingListValuesCatcherAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if not values and switch_value is not None:
msg = (
'We noticed that you provided no value for flag [{flag}]. This '
'behavior is deprecated.\nInstead, please provide an empty string '
'as the explicit value (try [{flag} \'\']).').format(
flag=option_string)
log.warn(msg)
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, switch_value, option_string=option_string)
return
if len(values) > 1:
class ArgShell(object):
def __init__(self, name):
self.option_strings = [name]
suggestions = []
if values and isinstance(values[0], dict):
aggregate_value = {}
for valdict in values:
aggregate_value.update(valdict)
suggestions.extend(
['%s=%s' % (k, v) for k, v in valdict.iteritems()])
if values and isinstance(values[0], list):
aggregate_value = []
suggestions.extend(
[','.join(map(str, vallist)) for vallist in values])
for vallist in values:
aggregate_value.extend(vallist)
extras = suggestions[1:]
msg = (
'We noticed that you are using space-separated lists, which are '
'deprecated. '
'Please transition to using comma-separated lists instead '
'(try "{flag} {values}"). '
'If you intend to use [{extras}] as positional arguments, put the '
'flags at the end.').format(
flag=option_string,
values=','.join(suggestions),
extras=', '.join(extras))
raise argparse.ArgumentError(ArgShell(option_string), msg)
else:
super(FloatingListValuesCatcherAction, self).__call__(
parser, namespace, values[0], option_string=option_string)
return FloatingListValuesCatcherAction
| true
| true
|
1c412fe8b1e7524b8e1d7e68275835296f537de6
| 850
|
py
|
Python
|
04-trees-graphs/Queue.py
|
harrisonlingren/ctci-solutions
|
75e162c1df695ffda645837a45d9f16e8363ff1b
|
[
"MIT"
] | null | null | null |
04-trees-graphs/Queue.py
|
harrisonlingren/ctci-solutions
|
75e162c1df695ffda645837a45d9f16e8363ff1b
|
[
"MIT"
] | null | null | null |
04-trees-graphs/Queue.py
|
harrisonlingren/ctci-solutions
|
75e162c1df695ffda645837a45d9f16e8363ff1b
|
[
"MIT"
] | null | null | null |
class Queue(object):
def __init__(self):
self.first = None
self.last = None
self.size = 0
def __len__(self):
return self.size
def dequeue(self):
if self.first is None:
return None
old = self.first.data
self.first = self.first.next
if self.first is None:
self.last = None
self.size -= 1
return old
def enqueue(self, val):
new = Node(val)
if self.last != None:
self.last.next = new
self.last = new
if self.first is None:
self.first = self.last
self.size += 1
def peek(self):
return self.first.data
def isEmpty(self):
return self.size <= 0
class Node(object):
def __init__(self, val):
self.data = val
self.next = None
| 21.25
| 36
| 0.522353
|
class Queue(object):
def __init__(self):
self.first = None
self.last = None
self.size = 0
def __len__(self):
return self.size
def dequeue(self):
if self.first is None:
return None
old = self.first.data
self.first = self.first.next
if self.first is None:
self.last = None
self.size -= 1
return old
def enqueue(self, val):
new = Node(val)
if self.last != None:
self.last.next = new
self.last = new
if self.first is None:
self.first = self.last
self.size += 1
def peek(self):
return self.first.data
def isEmpty(self):
return self.size <= 0
class Node(object):
def __init__(self, val):
self.data = val
self.next = None
| true
| true
|
1c413034a6e1650ac70498f32bb3a99fc861a9fb
| 1,352
|
py
|
Python
|
test.py
|
jfk1408/msbase.py
|
767dd1cb75db3a4d2a414afbff5a72c679a8c808
|
[
"MIT"
] | null | null | null |
test.py
|
jfk1408/msbase.py
|
767dd1cb75db3a4d2a414afbff5a72c679a8c808
|
[
"MIT"
] | null | null | null |
test.py
|
jfk1408/msbase.py
|
767dd1cb75db3a4d2a414afbff5a72c679a8c808
|
[
"MIT"
] | null | null | null |
import msbase.argparse_ as argparse
p = argparse.p()
p.add_argument('integers', type=int, help='just some integers')
args = p.parse_args()
print(args.integers)
from msbase.assert_ import *
assert_eq(1, 1)
assert_le(1, 2, "1 > 2")
from msbase.subprocess_ import *
print(call_std(["ls", "unknown"]))
ret = try_call_std(["./foo"])
assert ret == ("standard\n", "error\n", 0), ret
def task(i):
return i + 1
print(multiprocess(task, [1, 2, 3], n = 2))
print(multiprocess(task, [1, 2, 3], n = 2, debug_mode=True))
from msbase.lab import Step, AbstractLab, to_matrix
config1 = { "A": ["A1", "A2" ], "B": ["B1", "B2"] }
assert len(to_matrix(config1)) == 4
assert len(to_matrix({})) == 1
step1 = Step("mysleep1", ["./mysleep", "1"])
step2 = Step("mysleep2", ["./mysleep", "2"])
class MyLab(AbstractLab):
def digest_output(self, name: str, output, command):
return { "STDOUT length": len(output[0]) }
def digest_column_names(self):
return [ "STDOUT length"]
lab = MyLab("mylab", [step1, step2],
configurations={ "A": ["A1", "A2" ], "B": ["B1", "B2"] })
lab.run()
import os
f = [f for f in os.listdir(".") if f.endswith(".log")][0]
from msbase.utils import load_jsonl
assert_eq(len(load_jsonl(f)), 8)
lab.analyze()
os.remove(f)
os.remove('results.tex')
from msbase.logging import logger
logger.warn("bye!")
| 23.719298
| 69
| 0.636095
|
import msbase.argparse_ as argparse
p = argparse.p()
p.add_argument('integers', type=int, help='just some integers')
args = p.parse_args()
print(args.integers)
from msbase.assert_ import *
assert_eq(1, 1)
assert_le(1, 2, "1 > 2")
from msbase.subprocess_ import *
print(call_std(["ls", "unknown"]))
ret = try_call_std(["./foo"])
assert ret == ("standard\n", "error\n", 0), ret
def task(i):
return i + 1
print(multiprocess(task, [1, 2, 3], n = 2))
print(multiprocess(task, [1, 2, 3], n = 2, debug_mode=True))
from msbase.lab import Step, AbstractLab, to_matrix
config1 = { "A": ["A1", "A2" ], "B": ["B1", "B2"] }
assert len(to_matrix(config1)) == 4
assert len(to_matrix({})) == 1
step1 = Step("mysleep1", ["./mysleep", "1"])
step2 = Step("mysleep2", ["./mysleep", "2"])
class MyLab(AbstractLab):
def digest_output(self, name: str, output, command):
return { "STDOUT length": len(output[0]) }
def digest_column_names(self):
return [ "STDOUT length"]
lab = MyLab("mylab", [step1, step2],
configurations={ "A": ["A1", "A2" ], "B": ["B1", "B2"] })
lab.run()
import os
f = [f for f in os.listdir(".") if f.endswith(".log")][0]
from msbase.utils import load_jsonl
assert_eq(len(load_jsonl(f)), 8)
lab.analyze()
os.remove(f)
os.remove('results.tex')
from msbase.logging import logger
logger.warn("bye!")
| true
| true
|
1c4130a0ae7641bcd89722bccc6d8e09db11decd
| 3,836
|
py
|
Python
|
nvtabular/loader/torch.py
|
miguelusque/NVTabular
|
e58d318a64d8c1607e91c10b9b5d4a8b48bcab69
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/loader/torch.py
|
miguelusque/NVTabular
|
e58d318a64d8c1607e91c10b9b5d4a8b48bcab69
|
[
"Apache-2.0"
] | null | null | null |
nvtabular/loader/torch.py
|
miguelusque/NVTabular
|
e58d318a64d8c1607e91c10b9b5d4a8b48bcab69
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import torch
from torch.utils.dlpack import from_dlpack
from .backend import DataLoader
class IterDL(torch.utils.data.IterableDataset):
def __init__(self, file_paths, batch_size=1, shuffle=False):
self.file_paths = file_paths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
for file_path in self.file_paths:
pdf = pd.read_parquet(file_path)
for start in range(0, pdf.shape[0], self.batch_size):
df = pdf[start : start + self.batch_size]
if self.shuffle:
df = df.sample(frac=1).reset_index(drop=True)
yield df
class TorchAsyncItr(torch.utils.data.IterableDataset, DataLoader):
"""This class creates batches of tensor. Each batch size is specified by the user.
The data input requires an NVTabular dataset. Handles spillover to ensure all
batches are the specified size until the final batch.
Parameters
-----------
dataset : NVTabular dataset
cats : [str]
the list of categorical columns in the dataset
conts : [str]
the list of continuous columns in the dataset
labels : [str]
the list of label columns in the dataset
batch_size : int
the size of each batch to supply to the model
shuffle : bool
enable/disable shuffling of dataset
parts_per_chunk : int
number of partitions from the iterator, an NVTabular Dataset, to concatenate into a "chunk"
devices : [int]
list representing all available GPU IDs
"""
def __init__(
self,
dataset,
cats=None,
conts=None,
labels=None,
batch_size=1,
shuffle=False,
seed_fn=None,
parts_per_chunk=1,
device=None,
global_size=None,
global_rank=None,
drop_last=False,
):
DataLoader.__init__(
self,
dataset,
cats,
conts,
labels,
batch_size,
shuffle,
seed_fn=seed_fn,
parts_per_chunk=parts_per_chunk,
device=device,
global_size=global_size,
global_rank=global_rank,
drop_last=drop_last,
)
def __iter__(self):
return DataLoader.__iter__(self)
def _get_device_ctx(self, dev):
return torch.cuda.device("cuda:{}".format(dev))
def _to_tensor(self, gdf, dtype=None):
dl_pack = gdf.to_dlpack()
tensor = from_dlpack(dl_pack)
return tensor.type(dtype)
def _split_fn(self, tensor, idx, axis=0):
return torch.split(tensor, idx, dim=axis)
@property
def _LONG_DTYPE(self):
return torch.long
@property
def _FLOAT32_DTYPE(self):
return torch.float32
def _handle_tensors(self, cats, conts, labels):
if isinstance(conts, torch.Tensor):
conts = conts.clone()
return cats, conts, labels
class DLDataLoader(torch.utils.data.DataLoader):
"""
This class is an extension of the torch dataloader.
It is required to support the FastAI framework.
"""
def __len__(self):
return len(self.dataset)
| 29.507692
| 99
| 0.63634
|
import pandas as pd
import torch
from torch.utils.dlpack import from_dlpack
from .backend import DataLoader
class IterDL(torch.utils.data.IterableDataset):
def __init__(self, file_paths, batch_size=1, shuffle=False):
self.file_paths = file_paths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
for file_path in self.file_paths:
pdf = pd.read_parquet(file_path)
for start in range(0, pdf.shape[0], self.batch_size):
df = pdf[start : start + self.batch_size]
if self.shuffle:
df = df.sample(frac=1).reset_index(drop=True)
yield df
class TorchAsyncItr(torch.utils.data.IterableDataset, DataLoader):
def __init__(
self,
dataset,
cats=None,
conts=None,
labels=None,
batch_size=1,
shuffle=False,
seed_fn=None,
parts_per_chunk=1,
device=None,
global_size=None,
global_rank=None,
drop_last=False,
):
DataLoader.__init__(
self,
dataset,
cats,
conts,
labels,
batch_size,
shuffle,
seed_fn=seed_fn,
parts_per_chunk=parts_per_chunk,
device=device,
global_size=global_size,
global_rank=global_rank,
drop_last=drop_last,
)
def __iter__(self):
return DataLoader.__iter__(self)
def _get_device_ctx(self, dev):
return torch.cuda.device("cuda:{}".format(dev))
def _to_tensor(self, gdf, dtype=None):
dl_pack = gdf.to_dlpack()
tensor = from_dlpack(dl_pack)
return tensor.type(dtype)
def _split_fn(self, tensor, idx, axis=0):
return torch.split(tensor, idx, dim=axis)
@property
def _LONG_DTYPE(self):
return torch.long
@property
def _FLOAT32_DTYPE(self):
return torch.float32
def _handle_tensors(self, cats, conts, labels):
if isinstance(conts, torch.Tensor):
conts = conts.clone()
return cats, conts, labels
class DLDataLoader(torch.utils.data.DataLoader):
def __len__(self):
return len(self.dataset)
| true
| true
|
1c4130b0703a812ab59230e680f4142947d9957a
| 1,827
|
py
|
Python
|
tests/functional/test_skip_unless_on_aarch64.py
|
bdrung/pytest-skip-markers
|
69b9be27b1969a7b59666afefcb63bf1fdf66b31
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_skip_unless_on_aarch64.py
|
bdrung/pytest-skip-markers
|
69b9be27b1969a7b59666afefcb63bf1fdf66b31
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_skip_unless_on_aarch64.py
|
bdrung/pytest-skip-markers
|
69b9be27b1969a7b59666afefcb63bf1fdf66b31
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021-2022 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""
Test the ``@pytest.mark.skip_unless_on_aarch64`` marker.
"""
import sys
from unittest import mock
import pytest
pytestmark = [
pytest.mark.skipif(
sys.platform.startswith("win")
and sys.version_info >= (3, 8)
and sys.version_info < (3, 10),
reason="PyTest's capture and pytester.runpytest_inprocess looks broken on Windows and Py(>3.8,<3.10)",
),
]
def test_skipped(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip_unless_on_aarch64
def test_one():
assert True
"""
)
with mock.patch("pytestskipmarkers.utils.platform.is_aarch64", return_value=False):
res = pytester.runpytest_inprocess()
res.assert_outcomes(skipped=1)
res.stdout.no_fnmatch_line("*PytestUnknownMarkWarning*")
def test_not_skipped(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip_unless_on_aarch64
def test_one():
assert True
"""
)
with mock.patch("pytestskipmarkers.utils.platform.is_aarch64", return_value=True):
res = pytester.runpytest_inprocess()
res.assert_outcomes(passed=1)
res.stdout.no_fnmatch_line("*PytestUnknownMarkWarning*")
def test_skip_reason(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip_unless_on_aarch64(reason='Because!')
def test_one():
assert True
"""
)
with mock.patch("pytestskipmarkers.utils.platform.is_aarch64", return_value=False):
res = pytester.runpytest_inprocess("-ra", "-s", "-vv")
res.assert_outcomes(skipped=1)
res.stdout.fnmatch_lines(["SKIPPED * test_skip_reason.py:*: Because!"])
| 26.867647
| 110
| 0.648057
|
import sys
from unittest import mock
import pytest
pytestmark = [
pytest.mark.skipif(
sys.platform.startswith("win")
and sys.version_info >= (3, 8)
and sys.version_info < (3, 10),
reason="PyTest's capture and pytester.runpytest_inprocess looks broken on Windows and Py(>3.8,<3.10)",
),
]
def test_skipped(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip_unless_on_aarch64
def test_one():
assert True
"""
)
with mock.patch("pytestskipmarkers.utils.platform.is_aarch64", return_value=False):
res = pytester.runpytest_inprocess()
res.assert_outcomes(skipped=1)
res.stdout.no_fnmatch_line("*PytestUnknownMarkWarning*")
def test_not_skipped(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip_unless_on_aarch64
def test_one():
assert True
"""
)
with mock.patch("pytestskipmarkers.utils.platform.is_aarch64", return_value=True):
res = pytester.runpytest_inprocess()
res.assert_outcomes(passed=1)
res.stdout.no_fnmatch_line("*PytestUnknownMarkWarning*")
def test_skip_reason(pytester):
pytester.makepyfile(
"""
import pytest
@pytest.mark.skip_unless_on_aarch64(reason='Because!')
def test_one():
assert True
"""
)
with mock.patch("pytestskipmarkers.utils.platform.is_aarch64", return_value=False):
res = pytester.runpytest_inprocess("-ra", "-s", "-vv")
res.assert_outcomes(skipped=1)
res.stdout.fnmatch_lines(["SKIPPED * test_skip_reason.py:*: Because!"])
| true
| true
|
1c4130e0617182aa9a164573ce43128ca6cc58b9
| 1,656
|
py
|
Python
|
gitfs/utils/path.py
|
whywaita/gitfs
|
cf92acc1fdb0bf93d5998f223d9ef7b285bd74b1
|
[
"Apache-2.0"
] | 921
|
2018-03-27T16:26:14.000Z
|
2022-03-31T12:39:34.000Z
|
gitfs/utils/path.py
|
whywaita/gitfs
|
cf92acc1fdb0bf93d5998f223d9ef7b285bd74b1
|
[
"Apache-2.0"
] | 104
|
2015-01-20T21:31:25.000Z
|
2018-02-21T21:14:21.000Z
|
gitfs/utils/path.py
|
whywaita/gitfs
|
cf92acc1fdb0bf93d5998f223d9ef7b285bd74b1
|
[
"Apache-2.0"
] | 90
|
2015-01-05T12:59:11.000Z
|
2018-03-25T15:16:34.000Z
|
# Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import deque
def split_path_into_components(path):
"""
Splits a path and returns a list of its constituents.
E.g.: /totally/random/path => ['totally', 'random', 'path']
:param path: the path to be split
:type path: str
:returns: the list which contains the path components
.. note::
This function is by no means a function which treats all the possible
constructs. Since we build the paths, we assume the following format. The
path:
* has to start with the `/` character if is not empty.
* it cannot contain double slashes `//`
* it cannot end with trailing slashes `/`
Examples of correct paths:
* ``
* `/`
* `/a/b`
"""
head, tail = os.path.split(path)
if not tail:
return []
components = deque()
components.appendleft(tail)
path = head
while path and path != "/":
head, tail = os.path.split(path)
components.appendleft(tail)
path = head
return list(components)
| 28.551724
| 77
| 0.658816
|
import os
from collections import deque
def split_path_into_components(path):
head, tail = os.path.split(path)
if not tail:
return []
components = deque()
components.appendleft(tail)
path = head
while path and path != "/":
head, tail = os.path.split(path)
components.appendleft(tail)
path = head
return list(components)
| true
| true
|
1c4131053e347277fe9bbd48b407e1d6d4d7a94a
| 3,833
|
py
|
Python
|
pyvisdk/mo/host_local_account_manager.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/mo/host_local_account_manager.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/mo/host_local_account_manager.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class HostLocalAccountManager(BaseEntity):
'''This managed object type provides an interface through which local accounts on
a host are managed. Note that this managed object applies only to applications
that use a local account database on the host to provide authentication (ESX
Server, for example). POSIX and win32 hosts may impose different restrictions
on the password, ID, and description formats. POSIX host implementation may
restrict the user or group name to be lower case letters and less than 16
characters in total. It may also disallow characters such as ";", "\n", and so
on. In short, all the platform dependent rules and restrictions regarding
naming of users/groups and password apply here. An InvalidArgument fault is
thrown if any of these rules are not obeyed.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.HostLocalAccountManager):
super(HostLocalAccountManager, self).__init__(core, name=name, ref=ref, type=type)
def AssignUserToGroup(self, user, group):
'''Assigns a user to a group.
:param user: User ID of the account whose group membership is being assigned.
:param group: Destination group account to which the user is being assigned.
'''
return self.delegate("AssignUserToGroup")(user, group)
def CreateGroup(self, group):
'''Creates a local group account using the parameters defined in the
HostLocalAccountManagerAccountSpecification data object type. For POSIX hosts,
passing the HostLocalAccountManagerPosixAccountSpecification data object type
allows you to control the group ID format of the group account being created.
:param group: Specification of group being created.
'''
return self.delegate("CreateGroup")(group)
def CreateUser(self, user):
'''Creates a local user account using the parameters defined in the
HostLocalAccountManagerAccountSpecification data object type. For POSIX hosts,
passing HostLocalAccountManagerPosixAccountSpecification data object type
allows you to control the format of the user ID of the user account being
created.
:param user: Specification of user being created.
'''
return self.delegate("CreateUser")(user)
def RemoveGroup(self, groupName):
'''Removes a local group account.
:param groupName: Group ID of the group account being removed.
'''
return self.delegate("RemoveGroup")(groupName)
def RemoveUser(self, userName):
'''Removes a local user account.
:param userName: User ID of the user account being removed.
'''
return self.delegate("RemoveUser")(userName)
def UnassignUserFromGroup(self, user, group):
'''Unassigns a user from a group.
:param user: User being unassigned from group.
:param group: Group from which the user is being removed.
'''
return self.delegate("UnassignUserFromGroup")(user, group)
def UpdateUser(self, user):
'''Updates a local user account using the parameters defined in the
HostLocalAccountManagerAccountSpecification data object type.
:param user: Specification of user being updated.
'''
return self.delegate("UpdateUser")(user)
| 38.717172
| 99
| 0.665014
|
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.base.base_entity import BaseEntity
import logging
| true
| true
|
1c4132b65e5ec999b893c5bd01767981dc243825
| 113
|
py
|
Python
|
tests/grimlock/test_commands.py
|
rabstejnek/grimlock
|
37cc927f2a727c019b113c48ce29dba24750c247
|
[
"MIT"
] | null | null | null |
tests/grimlock/test_commands.py
|
rabstejnek/grimlock
|
37cc927f2a727c019b113c48ce29dba24750c247
|
[
"MIT"
] | null | null | null |
tests/grimlock/test_commands.py
|
rabstejnek/grimlock
|
37cc927f2a727c019b113c48ce29dba24750c247
|
[
"MIT"
] | null | null | null |
from grimlock import commands
def test_hello_world():
assert commands.hello_world(None) == "Hello, World!"
| 18.833333
| 56
| 0.743363
|
from grimlock import commands
def test_hello_world():
assert commands.hello_world(None) == "Hello, World!"
| true
| true
|
1c4132f1f619f17a9ea81acaaf18acfda97370ae
| 2,138
|
py
|
Python
|
tests/test_CLOCer.py
|
Niweera/CLOCer
|
23a136a1190fef2f5d72b64913823504cb1d0f56
|
[
"MIT"
] | 5
|
2022-01-20T18:40:34.000Z
|
2022-01-26T01:37:31.000Z
|
tests/test_CLOCer.py
|
Niweera/CLOCer
|
23a136a1190fef2f5d72b64913823504cb1d0f56
|
[
"MIT"
] | 1
|
2022-02-19T05:12:30.000Z
|
2022-02-19T05:12:30.000Z
|
tests/test_CLOCer.py
|
Niweera/CLOCer
|
23a136a1190fef2f5d72b64913823504cb1d0f56
|
[
"MIT"
] | null | null | null |
import subprocess
from unittest import TestCase
from clocer.CLOCer import CLOCer
import json
from os.path import exists, abspath, join, dirname, realpath
from unittest.mock import patch, Mock
from clocer.Configure import Configure
from clocer.CustomExceptions import CLOCerError, ConfigureError
class TestCLOCer(TestCase):
def setUp(self) -> None:
self.url = "https://github.com/Niweera/cpu-meter"
def test_setup(self):
CLOCer.setup()
def test_setup_fail(self):
actual_error_msg = "Mock ConfigureError occurred in Configure.setup_output()"
with patch.object(Configure, "setup_output", return_value=None) as get_mock:
get_mock.side_effect = ConfigureError(actual_error_msg)
with self.assertRaises(CLOCerError):
CLOCer.setup()
def test_run(self):
url = self.url
repo_name = url.replace("https://github.com/", "").replace("/", "_")
CLOCer.setup()
CLOCer.run(url)
output_path: str = abspath(
join(
dirname(dirname(realpath(__file__))),
"clocer",
"output",
f"{repo_name}.json",
)
)
fixture_path: str = abspath(
join(
dirname(dirname(realpath(__file__))),
"tests",
"fixtures",
f"{repo_name}.json",
)
)
with open(output_path, "r") as test_file:
test_result = json.load(test_file)
test_result.pop("header", None)
with open(fixture_path, "r") as fixture_file:
actual_result = json.load(fixture_file)
actual_result.pop("header", None)
self.assertTrue(exists(output_path))
self.assertDictEqual(test_result, actual_result)
def test_run_fail(self):
mock = Mock()
mock.returncode = Mock(return_value=1)
CLOCer.setup()
with patch.object(subprocess, "run") as get_mock:
get_mock.return_value = mock
with self.assertRaises(CLOCerError):
CLOCer.run(self.url)
| 30.542857
| 85
| 0.596819
|
import subprocess
from unittest import TestCase
from clocer.CLOCer import CLOCer
import json
from os.path import exists, abspath, join, dirname, realpath
from unittest.mock import patch, Mock
from clocer.Configure import Configure
from clocer.CustomExceptions import CLOCerError, ConfigureError
class TestCLOCer(TestCase):
def setUp(self) -> None:
self.url = "https://github.com/Niweera/cpu-meter"
def test_setup(self):
CLOCer.setup()
def test_setup_fail(self):
actual_error_msg = "Mock ConfigureError occurred in Configure.setup_output()"
with patch.object(Configure, "setup_output", return_value=None) as get_mock:
get_mock.side_effect = ConfigureError(actual_error_msg)
with self.assertRaises(CLOCerError):
CLOCer.setup()
def test_run(self):
url = self.url
repo_name = url.replace("https://github.com/", "").replace("/", "_")
CLOCer.setup()
CLOCer.run(url)
output_path: str = abspath(
join(
dirname(dirname(realpath(__file__))),
"clocer",
"output",
f"{repo_name}.json",
)
)
fixture_path: str = abspath(
join(
dirname(dirname(realpath(__file__))),
"tests",
"fixtures",
f"{repo_name}.json",
)
)
with open(output_path, "r") as test_file:
test_result = json.load(test_file)
test_result.pop("header", None)
with open(fixture_path, "r") as fixture_file:
actual_result = json.load(fixture_file)
actual_result.pop("header", None)
self.assertTrue(exists(output_path))
self.assertDictEqual(test_result, actual_result)
def test_run_fail(self):
mock = Mock()
mock.returncode = Mock(return_value=1)
CLOCer.setup()
with patch.object(subprocess, "run") as get_mock:
get_mock.return_value = mock
with self.assertRaises(CLOCerError):
CLOCer.run(self.url)
| true
| true
|
1c41330bf1c92a6991a25f262dc16d768a487ef6
| 610
|
py
|
Python
|
tests/test_draft_combine.py
|
jtpavlock/nba_stats
|
ee63e5e0cca412c95805c99bea51e2b5685324ff
|
[
"BSD-3-Clause"
] | 12
|
2020-01-06T23:46:58.000Z
|
2021-04-28T15:52:18.000Z
|
tests/test_draft_combine.py
|
jtpavlock/nba_stats
|
ee63e5e0cca412c95805c99bea51e2b5685324ff
|
[
"BSD-3-Clause"
] | 4
|
2019-12-22T14:09:30.000Z
|
2021-07-01T00:46:32.000Z
|
tests/test_draft_combine.py
|
jtpavlock/nba_stats
|
ee63e5e0cca412c95805c99bea51e2b5685324ff
|
[
"BSD-3-Clause"
] | 2
|
2020-07-18T09:27:55.000Z
|
2022-03-05T16:44:45.000Z
|
"""Test draft_combine module and included endpoints.
We won't mock any api calls just to make sure they haven't changed on us.
"""
from nbapy import draft_combine
class TestSummary:
@staticmethod
def test_stats():
stats = draft_combine.Summary().stats()
assert stats is not None
class TestDrillResults:
@staticmethod
def test_stats():
stats = draft_combine.DrillResults().stats()
assert stats is not None
class TestSpotShooting:
@staticmethod
def test_stats():
stats = draft_combine.SpotShooting().stats()
assert stats is not None
| 22.592593
| 73
| 0.688525
|
from nbapy import draft_combine
class TestSummary:
@staticmethod
def test_stats():
stats = draft_combine.Summary().stats()
assert stats is not None
class TestDrillResults:
@staticmethod
def test_stats():
stats = draft_combine.DrillResults().stats()
assert stats is not None
class TestSpotShooting:
@staticmethod
def test_stats():
stats = draft_combine.SpotShooting().stats()
assert stats is not None
| true
| true
|
1c41336f0439ff91a396bffaaf5e24e3dbe7f124
| 383
|
py
|
Python
|
trade/migrations/0004_item_verified.py
|
dc74089/oneshirt
|
437941bf29e41098df3eb3cde2370ea29c897969
|
[
"Apache-2.0"
] | null | null | null |
trade/migrations/0004_item_verified.py
|
dc74089/oneshirt
|
437941bf29e41098df3eb3cde2370ea29c897969
|
[
"Apache-2.0"
] | 6
|
2018-03-06T18:07:08.000Z
|
2018-12-27T22:54:12.000Z
|
trade/migrations/0004_item_verified.py
|
dc74089/oneshirt
|
437941bf29e41098df3eb3cde2370ea29c897969
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0 on 2018-02-28 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0003_auto_20180223_1712'),
]
operations = [
migrations.AddField(
model_name='item',
name='verified',
field=models.BooleanField(default=False),
),
]
| 20.157895
| 53
| 0.597911
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trade', '0003_auto_20180223_1712'),
]
operations = [
migrations.AddField(
model_name='item',
name='verified',
field=models.BooleanField(default=False),
),
]
| true
| true
|
1c41364d26b6a4490d48a78f375930da474b7cd3
| 736
|
py
|
Python
|
Links.py
|
HuzefaUsama25/IntenseResearcher
|
26d25384f62d29dc8b4231a075c94b8667ea1d73
|
[
"MIT"
] | 1
|
2021-08-12T11:00:19.000Z
|
2021-08-12T11:00:19.000Z
|
Links.py
|
HuzefaUsama25/IntenseResearcher
|
26d25384f62d29dc8b4231a075c94b8667ea1d73
|
[
"MIT"
] | null | null | null |
Links.py
|
HuzefaUsama25/IntenseResearcher
|
26d25384f62d29dc8b4231a075c94b8667ea1d73
|
[
"MIT"
] | null | null | null |
import bs4
import requests
import sys
import os
import re
import unicodedata
url = input("Search for: ")
filename = "links"
filename=r"D:\Huzefa\Desktop\The Big Researcher\\" +filename+ ".txt"
url = "https://search.yahoo.com/search?p="+url+"&n=100"
res = requests.get(url)
soup = bs4.BeautifulSoup(res.text, "lxml")
##
file = open(filename , 'wb')
search = soup.select("a.ac-algo.fz-l.ac-21th.lh-24")
for link in search[:100]:
actlink = link.get('href')
f = actlink
file.write(unicodedata.normalize('NFD', re.sub("[\(\[].*?[\)\]]", "", f)).encode('ascii', 'ignore'))
file.write(unicodedata.normalize('NFD', re.sub("[\(\[].*?[\)\]]", "", os.linesep)).encode('ascii', 'ignore'))
file.close()
| 28.307692
| 114
| 0.616848
|
import bs4
import requests
import sys
import os
import re
import unicodedata
url = input("Search for: ")
filename = "links"
filename=r"D:\Huzefa\Desktop\The Big Researcher\\" +filename+ ".txt"
url = "https://search.yahoo.com/search?p="+url+"&n=100"
res = requests.get(url)
soup = bs4.BeautifulSoup(res.text, "lxml")
ile = open(filename , 'wb')
search = soup.select("a.ac-algo.fz-l.ac-21th.lh-24")
for link in search[:100]:
actlink = link.get('href')
f = actlink
file.write(unicodedata.normalize('NFD', re.sub("[\(\[].*?[\)\]]", "", f)).encode('ascii', 'ignore'))
file.write(unicodedata.normalize('NFD', re.sub("[\(\[].*?[\)\]]", "", os.linesep)).encode('ascii', 'ignore'))
file.close()
| true
| true
|
1c4136e5d43993b9aa877f3775580eb7b794667f
| 6,569
|
py
|
Python
|
mmdet/datasets/utils.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/utils.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/utils.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from mmcv.cnn import VGG
from mmcv.runner.hooks import HOOKS, Hook
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
from mmdet.models.dense_heads import GARPNHead, RPNHead, RankBasedRPNHead
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
def replace_ImageToTensor(pipelines):
"""Replace the ImageToTensor transform in a data pipeline to
DefaultFormatBundle, which is normally useful in batch inference.
Args:
pipelines (list[dict]): Data pipeline configs.
Returns:
list: The new pipeline list with all ImageToTensor replaced by
DefaultFormatBundle.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(
... type='MultiScaleFlipAug',
... img_scale=(1333, 800),
... flip=False,
... transforms=[
... dict(type='Resize', keep_ratio=True),
... dict(type='RandomFlip'),
... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
... dict(type='Pad', size_divisor=32),
... dict(type='ImageToTensor', keys=['img']),
... dict(type='Collect', keys=['img']),
... ])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(
... type='MultiScaleFlipAug',
... img_scale=(1333, 800),
... flip=False,
... transforms=[
... dict(type='Resize', keep_ratio=True),
... dict(type='RandomFlip'),
... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img']),
... ])
... ]
>>> assert expected_pipelines == replace_ImageToTensor(pipelines)
"""
pipelines = copy.deepcopy(pipelines)
for i, pipeline in enumerate(pipelines):
if pipeline['type'] == 'MultiScaleFlipAug':
assert 'transforms' in pipeline
pipeline['transforms'] = replace_ImageToTensor(
pipeline['transforms'])
elif pipeline['type'] == 'ImageToTensor':
warnings.warn(
'"ImageToTensor" pipeline is replaced by '
'"DefaultFormatBundle" for batch inference. It is '
'recommended to manually replace it in the test '
'data pipeline in your config file.', UserWarning)
pipelines[i] = {'type': 'DefaultFormatBundle'}
return pipelines
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = PIPELINES.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
@HOOKS.register_module()
class NumClassCheckHook(Hook):
def _check_head(self, runner):
"""Check whether the `num_classes` in head matches the length of
`CLASSES` in `dataset`.
Args:
runner (obj:`EpochBasedRunner`): Epoch based Runner.
"""
model = runner.model
dataset = runner.data_loader.dataset
if dataset.CLASSES is None:
runner.logger.warning(
f'Please set `CLASSES` '
f'in the {dataset.__class__.__name__} and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
assert type(dataset.CLASSES) is not str, \
(f'`CLASSES` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'CLASSES = ({dataset.CLASSES},)')
for name, module in model.named_modules():
if hasattr(module, 'num_classes') and not isinstance(
module, (RPNHead, VGG, FusedSemanticHead, GARPNHead, RankBasedRPNHead)):
assert module.num_classes == len(dataset.CLASSES), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `CLASSES` '
f'{len(dataset.CLASSES)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner):
"""Check whether the training dataset is compatible with head.
Args:
runner (obj:`EpochBasedRunner`): Epoch based Runner.
"""
self._check_head(runner)
def before_val_epoch(self, runner):
"""Check whether the dataset in val epoch is compatible with head.
Args:
runner (obj:`EpochBasedRunner`): Epoch based Runner.
"""
self._check_head(runner)
| 39.812121
| 96
| 0.558228
|
import copy
import warnings
from mmcv.cnn import VGG
from mmcv.runner.hooks import HOOKS, Hook
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
from mmdet.models.dense_heads import GARPNHead, RPNHead, RankBasedRPNHead
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
def replace_ImageToTensor(pipelines):
pipelines = copy.deepcopy(pipelines)
for i, pipeline in enumerate(pipelines):
if pipeline['type'] == 'MultiScaleFlipAug':
assert 'transforms' in pipeline
pipeline['transforms'] = replace_ImageToTensor(
pipeline['transforms'])
elif pipeline['type'] == 'ImageToTensor':
warnings.warn(
'"ImageToTensor" pipeline is replaced by '
'"DefaultFormatBundle" for batch inference. It is '
'recommended to manually replace it in the test '
'data pipeline in your config file.', UserWarning)
pipelines[i] = {'type': 'DefaultFormatBundle'}
return pipelines
def get_loading_pipeline(pipeline):
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = PIPELINES.get(cfg['type'])
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
@HOOKS.register_module()
class NumClassCheckHook(Hook):
def _check_head(self, runner):
model = runner.model
dataset = runner.data_loader.dataset
if dataset.CLASSES is None:
runner.logger.warning(
f'Please set `CLASSES` '
f'in the {dataset.__class__.__name__} and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
assert type(dataset.CLASSES) is not str, \
(f'`CLASSES` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'CLASSES = ({dataset.CLASSES},)')
for name, module in model.named_modules():
if hasattr(module, 'num_classes') and not isinstance(
module, (RPNHead, VGG, FusedSemanticHead, GARPNHead, RankBasedRPNHead)):
assert module.num_classes == len(dataset.CLASSES), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `CLASSES` '
f'{len(dataset.CLASSES)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner):
self._check_head(runner)
def before_val_epoch(self, runner):
self._check_head(runner)
| true
| true
|
1c4136fdc0121d826916494d39f576e839dcc318
| 58
|
py
|
Python
|
background_task/models.py
|
mayudong07/django-background-tasks
|
1288d6385ccfd53cee1e9d280aa489b68002277b
|
[
"BSD-3-Clause"
] | null | null | null |
background_task/models.py
|
mayudong07/django-background-tasks
|
1288d6385ccfd53cee1e9d280aa489b68002277b
|
[
"BSD-3-Clause"
] | null | null | null |
background_task/models.py
|
mayudong07/django-background-tasks
|
1288d6385ccfd53cee1e9d280aa489b68002277b
|
[
"BSD-3-Clause"
] | null | null | null |
from .base_models import *
from .models_completed import *
| 29
| 31
| 0.810345
|
from .base_models import *
from .models_completed import *
| true
| true
|
1c413753c52ffb67a379592a8901e302efcba41f
| 8,169
|
py
|
Python
|
aincrad/views.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | null | null | null |
aincrad/views.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | null | null | null |
aincrad/views.py
|
ankanb240/otis-web
|
45eda65b419705c65c02b15872a137969d53d8e9
|
[
"MIT"
] | null | null | null |
from hashlib import sha256
from typing import Any, Dict
from allauth.socialaccount.models import SocialAccount
from arch.models import Hint, Problem
from core.models import Unit
from dashboard.models import ProblemSuggestion, PSet
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.db.models.query_utils import Q
from django.http.request import HttpRequest
from django.http.response import JsonResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from roster.models import Invoice, Student, StudentRegistration, UnitInquiry
from sql_util.aggregates import SubqueryCount
from unidecode import unidecode
# Create your views here.
def venueq_handler(action: str, request: HttpRequest) -> JsonResponse:
if action == 'grade_problem_set':
# mark problem set as done
pset = get_object_or_404(PSet, pk=request.POST['pk'])
pset.approved = bool(request.POST['approved'])
pset.clubs = request.POST.get('clubs', None)
pset.hours = request.POST.get('hours', None)
pset.save()
if pset.resubmitted is False:
# unlock the unit the student asked for
finished_unit = get_object_or_404(Unit, pk=request.POST['unit__pk'])
student = get_object_or_404(Student, pk=request.POST['student__pk'])
if 'next_unit_to_unlock__pk' not in request.POST:
unlockable_units = student.generate_curriculum_queryset().exclude(
has_pset=True
).exclude(id__in=student.unlocked_units.all())
target = unlockable_units.first()
else:
target = get_object_or_404(Unit, pk=request.POST['next_unit_to_unlock__pk'])
if target is not None:
student.unlocked_units.add(target)
student.unlocked_units.remove(finished_unit)
return JsonResponse({'result': 'success'}, status=200)
elif action == 'approve_inquiries':
for inquiry in UnitInquiry.objects.filter(status="NEW", student__semester__active=True):
inquiry.run_accept()
return JsonResponse({'result': 'success'}, status=200)
elif action == 'mark_suggestion':
suggestion = ProblemSuggestion.objects.get(pk=request.POST['pk'])
suggestion.resolved = True
suggestion.save()
return JsonResponse({'result': 'success'}, status=200)
elif action == 'init':
inquiries = UnitInquiry.objects.filter(
status="NEW", student__semester__active=True
).annotate(
total_inquiry_count=SubqueryCount('student__unitinquiry'),
unlock_inquiry_count=SubqueryCount(
'student__unitinquiry', filter=Q(action_type="UNLOCK")
),
)
data: Dict[str, Any] = {
'_name':
'Root',
'_children':
[
{
'_name':
'Problem sets',
'_children':
list(
PSet.objects.filter(approved=False, student__semester__active=True).values(
'pk',
'approved',
'resubmitted',
'feedback',
'special_notes',
'student__pk',
'student__user__first_name',
'student__user__last_name',
'student__user__email',
'hours',
'clubs',
'eligible',
'unit__group__name',
'unit__code',
'unit__pk',
'next_unit_to_unlock__group__name',
'next_unit_to_unlock__code',
'next_unit_to_unlock__pk',
'upload__content',
)
)
}, {
'_name':
'Inquiries',
'inquiries':
list(
inquiries.values(
'pk',
'unit__group__name',
'unit__code',
'student__user__first_name',
'student__user__last_name',
'explanation',
'created_at',
'unlock_inquiry_count',
'total_inquiry_count',
)
),
}, {
'_name':
'Suggestions',
'_children':
list(
ProblemSuggestion.objects.filter(resolved=False).values(
'pk',
'created_at',
'student__user__first_name',
'student__user__last_name',
'student__user__email',
'source',
'description',
'statement',
'solution',
'comments',
'acknowledge',
'weight',
'unit__group__name',
'unit__code',
)
)
}
],
}
return JsonResponse(data, status=200)
else:
raise Exception("No such command")
def discord_handler(action: str, request: HttpRequest) -> JsonResponse:
assert action == 'register'
# check whether social account exists
uid = int(request.POST['uid'])
queryset = SocialAccount.objects.filter(uid=uid)
if not (n := len(queryset)) == 1:
return JsonResponse({'result': 'nonexistent', 'length': n})
social = queryset.get() # get the social account for this; should never 404
user = social.user
student = Student.objects.filter(user=user, semester__active=True).first()
regform = StudentRegistration.objects.filter(
user=user, container__semester__active=True
).first()
if student is not None:
return JsonResponse(
{
'result': 'success',
'user': social.user.username,
'name': social.user.get_full_name(),
'uid': uid,
'track': student.track,
'gender': regform.gender if regform is not None else '?',
'country': regform.country if regform is not None else '???',
'num_years': Student.objects.filter(user=user).count(),
}
)
elif student is None and regform is not None:
return JsonResponse({'result': 'pending'})
else:
return JsonResponse({'result': 'unregistered'})
def problems_handler(action: str, request: HttpRequest) -> JsonResponse:
puid = request.POST['puid'].upper()
if action == 'get_hints':
problem, _ = Problem.objects.get_or_create(puid=puid)
return JsonResponse(
{'hints': list(Hint.objects.filter(problem=problem).values('keywords', 'id', 'number'))}
)
elif action == 'add_hints':
problem, _ = Problem.objects.get_or_create(puid=puid)
content = request.POST['content']
existing_hint_numbers = set(
Hint.objects.filter(problem=problem).values_list('number', flat=True)
)
if 'number' in request.POST:
number = int(request.POST['number'])
else:
number = 0
while number in existing_hint_numbers:
number += 10
keywords = request.POST.get('keywords', "imported from discord")
hint = Hint.objects.create(
problem=problem, number=number, content=content, keywords=keywords
)
return JsonResponse({'pk': hint.pk, 'number': number})
else:
raise NotImplementedError(action)
def invoice_handler(action: str, request: HttpRequest) -> JsonResponse:
def sanitize(s: str, last: bool = False) -> str:
return unidecode(s).lower().split(' ', maxsplit=1)[-1 if last else 0]
invoices = Invoice.objects.filter(student__semester__active=True)
invoices = invoices.select_related('student__user')
fields = ('adjustment', 'extras', 'total_paid')
data = request.POST.dict()
del data['token']
del data['action']
for inv in invoices:
if inv.student.user is not None:
first_name = sanitize(inv.student.user.first_name)
last_name = sanitize(inv.student.user.last_name, last=True)
for k in fields:
if (x := data.pop(f'{k}.{first_name}.{last_name}', None)) is not None:
assert isinstance(x, str)
setattr(inv, k, float(x))
Invoice.objects.bulk_update(invoices, fields, batch_size=25)
return JsonResponse(data)
@csrf_exempt
@require_POST
def api(request: HttpRequest) -> JsonResponse:
action = request.POST.get('action', None)
if action is None:
raise SuspiciousOperation('You need to provide an action, silly')
if settings.PRODUCTION:
token = request.POST.get('token')
assert token is not None
if not sha256(token.encode('ascii')).hexdigest() == settings.API_TARGET_HASH:
return JsonResponse({'error': "☕"}, status=418)
if action in ('grade_problem_set', 'approve_inquiries', 'mark_suggestion', 'init'):
return venueq_handler(action, request)
elif action in ('register', ):
return discord_handler(action, request)
elif action in ('get_hints', 'add_hints'):
return problems_handler(action, request)
elif action in ('invoice', ):
return invoice_handler(action, request)
else:
return JsonResponse({'error': 'No such command'}, status=400)
# vim: fdm=indent
| 32.416667
| 91
| 0.68613
|
from hashlib import sha256
from typing import Any, Dict
from allauth.socialaccount.models import SocialAccount
from arch.models import Hint, Problem
from core.models import Unit
from dashboard.models import ProblemSuggestion, PSet
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.db.models.query_utils import Q
from django.http.request import HttpRequest
from django.http.response import JsonResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from roster.models import Invoice, Student, StudentRegistration, UnitInquiry
from sql_util.aggregates import SubqueryCount
from unidecode import unidecode
def venueq_handler(action: str, request: HttpRequest) -> JsonResponse:
if action == 'grade_problem_set':
pset = get_object_or_404(PSet, pk=request.POST['pk'])
pset.approved = bool(request.POST['approved'])
pset.clubs = request.POST.get('clubs', None)
pset.hours = request.POST.get('hours', None)
pset.save()
if pset.resubmitted is False:
finished_unit = get_object_or_404(Unit, pk=request.POST['unit__pk'])
student = get_object_or_404(Student, pk=request.POST['student__pk'])
if 'next_unit_to_unlock__pk' not in request.POST:
unlockable_units = student.generate_curriculum_queryset().exclude(
has_pset=True
).exclude(id__in=student.unlocked_units.all())
target = unlockable_units.first()
else:
target = get_object_or_404(Unit, pk=request.POST['next_unit_to_unlock__pk'])
if target is not None:
student.unlocked_units.add(target)
student.unlocked_units.remove(finished_unit)
return JsonResponse({'result': 'success'}, status=200)
elif action == 'approve_inquiries':
for inquiry in UnitInquiry.objects.filter(status="NEW", student__semester__active=True):
inquiry.run_accept()
return JsonResponse({'result': 'success'}, status=200)
elif action == 'mark_suggestion':
suggestion = ProblemSuggestion.objects.get(pk=request.POST['pk'])
suggestion.resolved = True
suggestion.save()
return JsonResponse({'result': 'success'}, status=200)
elif action == 'init':
inquiries = UnitInquiry.objects.filter(
status="NEW", student__semester__active=True
).annotate(
total_inquiry_count=SubqueryCount('student__unitinquiry'),
unlock_inquiry_count=SubqueryCount(
'student__unitinquiry', filter=Q(action_type="UNLOCK")
),
)
data: Dict[str, Any] = {
'_name':
'Root',
'_children':
[
{
'_name':
'Problem sets',
'_children':
list(
PSet.objects.filter(approved=False, student__semester__active=True).values(
'pk',
'approved',
'resubmitted',
'feedback',
'special_notes',
'student__pk',
'student__user__first_name',
'student__user__last_name',
'student__user__email',
'hours',
'clubs',
'eligible',
'unit__group__name',
'unit__code',
'unit__pk',
'next_unit_to_unlock__group__name',
'next_unit_to_unlock__code',
'next_unit_to_unlock__pk',
'upload__content',
)
)
}, {
'_name':
'Inquiries',
'inquiries':
list(
inquiries.values(
'pk',
'unit__group__name',
'unit__code',
'student__user__first_name',
'student__user__last_name',
'explanation',
'created_at',
'unlock_inquiry_count',
'total_inquiry_count',
)
),
}, {
'_name':
'Suggestions',
'_children':
list(
ProblemSuggestion.objects.filter(resolved=False).values(
'pk',
'created_at',
'student__user__first_name',
'student__user__last_name',
'student__user__email',
'source',
'description',
'statement',
'solution',
'comments',
'acknowledge',
'weight',
'unit__group__name',
'unit__code',
)
)
}
],
}
return JsonResponse(data, status=200)
else:
raise Exception("No such command")
def discord_handler(action: str, request: HttpRequest) -> JsonResponse:
assert action == 'register'
uid = int(request.POST['uid'])
queryset = SocialAccount.objects.filter(uid=uid)
if not (n := len(queryset)) == 1:
return JsonResponse({'result': 'nonexistent', 'length': n})
social = queryset.get()
user = social.user
student = Student.objects.filter(user=user, semester__active=True).first()
regform = StudentRegistration.objects.filter(
user=user, container__semester__active=True
).first()
if student is not None:
return JsonResponse(
{
'result': 'success',
'user': social.user.username,
'name': social.user.get_full_name(),
'uid': uid,
'track': student.track,
'gender': regform.gender if regform is not None else '?',
'country': regform.country if regform is not None else '???',
'num_years': Student.objects.filter(user=user).count(),
}
)
elif student is None and regform is not None:
return JsonResponse({'result': 'pending'})
else:
return JsonResponse({'result': 'unregistered'})
def problems_handler(action: str, request: HttpRequest) -> JsonResponse:
puid = request.POST['puid'].upper()
if action == 'get_hints':
problem, _ = Problem.objects.get_or_create(puid=puid)
return JsonResponse(
{'hints': list(Hint.objects.filter(problem=problem).values('keywords', 'id', 'number'))}
)
elif action == 'add_hints':
problem, _ = Problem.objects.get_or_create(puid=puid)
content = request.POST['content']
existing_hint_numbers = set(
Hint.objects.filter(problem=problem).values_list('number', flat=True)
)
if 'number' in request.POST:
number = int(request.POST['number'])
else:
number = 0
while number in existing_hint_numbers:
number += 10
keywords = request.POST.get('keywords', "imported from discord")
hint = Hint.objects.create(
problem=problem, number=number, content=content, keywords=keywords
)
return JsonResponse({'pk': hint.pk, 'number': number})
else:
raise NotImplementedError(action)
def invoice_handler(action: str, request: HttpRequest) -> JsonResponse:
def sanitize(s: str, last: bool = False) -> str:
return unidecode(s).lower().split(' ', maxsplit=1)[-1 if last else 0]
invoices = Invoice.objects.filter(student__semester__active=True)
invoices = invoices.select_related('student__user')
fields = ('adjustment', 'extras', 'total_paid')
data = request.POST.dict()
del data['token']
del data['action']
for inv in invoices:
if inv.student.user is not None:
first_name = sanitize(inv.student.user.first_name)
last_name = sanitize(inv.student.user.last_name, last=True)
for k in fields:
if (x := data.pop(f'{k}.{first_name}.{last_name}', None)) is not None:
assert isinstance(x, str)
setattr(inv, k, float(x))
Invoice.objects.bulk_update(invoices, fields, batch_size=25)
return JsonResponse(data)
@csrf_exempt
@require_POST
def api(request: HttpRequest) -> JsonResponse:
action = request.POST.get('action', None)
if action is None:
raise SuspiciousOperation('You need to provide an action, silly')
if settings.PRODUCTION:
token = request.POST.get('token')
assert token is not None
if not sha256(token.encode('ascii')).hexdigest() == settings.API_TARGET_HASH:
return JsonResponse({'error': "☕"}, status=418)
if action in ('grade_problem_set', 'approve_inquiries', 'mark_suggestion', 'init'):
return venueq_handler(action, request)
elif action in ('register', ):
return discord_handler(action, request)
elif action in ('get_hints', 'add_hints'):
return problems_handler(action, request)
elif action in ('invoice', ):
return invoice_handler(action, request)
else:
return JsonResponse({'error': 'No such command'}, status=400)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.