id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6523286 | import webbrowser
def open(keyword):
url = 'https://www.google.co.jp/search?q='+keyword.replace(' ','+')+'&source=lnms&tbm=isch'
webbrowser.open(url,autoraise=True) | StarcoderdataPython |
4919860 | <reponame>bambielli-flex/dagster
import pytest
from dagster import (
DagsterInvariantViolationError,
DagsterResourceFunctionError,
RunConfig,
execute_pipeline,
)
from dagster_examples.toys.many_events import define_many_events_pipeline
from dagster_examples.toys.resources import define_resource_pipeline
from dagster_examples.toys.error_monster import define_error_monster_pipeline
def test_define_repo():
from dagster_examples.toys.repo import define_repo
assert define_repo()
def test_many_events_pipeline():
assert execute_pipeline(define_many_events_pipeline()).success
def test_resource_pipeline_no_config():
result = execute_pipeline(define_resource_pipeline())
assert result.result_for_solid('one').transformed_value() == 2
def test_resource_pipeline_with_config():
result = execute_pipeline(
define_resource_pipeline(), environment_dict={'resources': {'R1': {'config': 2}}}
)
assert result.result_for_solid('one').transformed_value() == 3
def test_error_monster_success():
assert execute_pipeline(
define_error_monster_pipeline(),
environment_dict={
'solids': {
'start': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'middle': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'end': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
},
'resources': {'errorable_resource': {'config': {'throw_on_resource_init': False}}},
},
)
assert execute_pipeline(
define_error_monster_pipeline(),
environment_dict={
'solids': {
'start': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'middle': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'end': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
},
'resources': {'errorable_resource': {'config': {'throw_on_resource_init': False}}},
},
run_config=RunConfig(mode='errorable_mode'),
)
def test_error_monster_wrong_mode():
with pytest.raises(DagsterInvariantViolationError):
assert execute_pipeline(
define_error_monster_pipeline(),
environment_dict={
'solids': {
'start': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'middle': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'end': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
},
'resources': {'errorable_resource': {'config': {'throw_on_resource_init': False}}},
},
run_config=RunConfig(mode='nope'),
)
def test_error_monster_success_error_on_resource():
with pytest.raises(DagsterResourceFunctionError):
execute_pipeline(
define_error_monster_pipeline(),
environment_dict={
'solids': {
'start': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'middle': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'end': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
},
'resources': {'errorable_resource': {'config': {'throw_on_resource_init': True}}},
},
)
def test_error_monster_type_error():
with pytest.raises(DagsterInvariantViolationError):
execute_pipeline(
define_error_monster_pipeline(),
environment_dict={
'solids': {
'start': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
'middle': {'config': {'throw_in_solid': False, 'return_wrong_type': True}},
'end': {'config': {'throw_in_solid': False, 'return_wrong_type': False}},
},
'resources': {'errorable_resource': {'config': {'throw_on_resource_init': False}}},
},
)
| StarcoderdataPython |
1907156 | <filename>tests/test_helpers.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import pytest
from .context import runtimedocs
@pytest.mark.parametrize('value,expected_type', [
(1, "<class 'int'>"),
('a', "<class 'str'>"),
(lambda:None, "<class 'function'>"),
([], "<class 'list'>"),
((1,), "<class 'tuple'>"),
({}, "<class 'dict'>"),
])
def test_get_type(value, expected_type):
assert runtimedocs.helpers.get_type(value) == expected_type
@pytest.mark.parametrize('value,expected_type', [
(1, "<class 'int'>"),
('a', "<class 'str'>"),
([], "<class 'list'>"),
((1,), "<class 'tuple'>"),
({}, "<class 'dict'>"),
])
def test_default_type_parser(value, expected_type):
parsed = runtimedocs.helpers.default_type_parser(value)
assert 'value' in parsed
if expected_type == "<class 'dict'>":
assert 'keys' in parsed
if expected_type in ["<class 'list'>", "<class 'tuple'>", "<class 'dict'>"]:
assert 'len' in parsed
def test_function_parser():
parsed = runtimedocs.helpers.function_parser(lambda:None)
assert 'name' in parsed
assert 'signature' in parsed
assert 'fullargspec' in parsed
assert 'isbuiltin' in parsed
def test_class_parser():
class MyClass(object):
pass
parsed = runtimedocs.helpers.class_parser(MyClass)
assert 'name' in parsed
assert 'signature' in parsed
assert 'fullargspec' in parsed
assert 'isbuiltin' in parsed
assert 'inheritance_tree' in parsed
def test_caller_name():
def outer():
def middle():
my_direct_caller = runtimedocs.helpers.caller_name(skip=2)
assert my_direct_caller == 'tests.test_helpers.outer'
def inner():
my_direct_caller = runtimedocs.helpers.caller_name(skip=2)
assert my_direct_caller == 'tests.test_helpers.middle'
my_caller_caller = runtimedocs.helpers.caller_name(skip=3)
assert my_caller_caller == 'tests.test_helpers.outer'
inner()
middle()
outer()
| StarcoderdataPython |
11330709 | <gh_stars>0
import string
import regex
from collections import deque
from flanker.mime.message.headers import encodedword, parametrized
from flanker.mime.message.headers.wrappers import ContentType, WithParams
from flanker.mime.message.errors import DecodingError
from flanker.utils import is_pure_ascii
MAX_LINE_LENGTH = 10000
def normalize(header):
return string.capwords(header.lower(), '-')
def parse_stream(stream):
"""Reads the incoming stream and returns list of tuples"""
out = deque()
for header in unfold(split(stream)):
out.append(parse_header(header))
return out
def parse_header(header):
""" Accepts a raw header with name, colons and newlines
and returns it's parsed value
"""
name, val = split2(header)
if not is_pure_ascii(name):
raise DecodingError("Non-ascii header name")
return name, parse_header_value(name, encodedword.unfold(val))
def parse_header_value(name, val):
if not is_pure_ascii(val):
if parametrized.is_parametrized(name, val):
raise DecodingError("Unsupported value in content- header")
return to_unicode(val)
else:
if parametrized.is_parametrized(name, val):
val, params = parametrized.decode(val)
if name == 'Content-Type':
main, sub = parametrized.fix_content_type(val)
return ContentType(main, sub, params)
else:
return WithParams(val, params)
elif "=?" in val:
# may be encoded word
return encodedword.decode(val)
else:
return val
def is_empty(line):
return line in ('\r\n', '\r', '\n')
RE_HEADER = regex.compile(r'^(From |[\041-\071\073-\176]+:|[\t ])')
def split(fp):
"""Read lines with headers until the start of body"""
lines = deque()
for line in fp:
if len(line) > MAX_LINE_LENGTH:
raise DecodingError(
"Line is too long: {}".format(len(line)))
if is_empty(line):
break
# tricky case if it's not a header and not an empty line
# ususally means that user forgot to separate the body and newlines
# so "unread" this line here, what means to treat it like a body
if not RE_HEADER.match(line):
fp.seek(fp.tell() - len(line))
break
lines.append(line)
return lines
def unfold(lines):
headers = deque()
for line in lines:
# ignore unix from
if line.startswith("From "):
continue
# this is continuation
elif line[0] in ' \t':
extend(headers, line)
else:
headers.append(line)
new_headers = deque()
for h in headers:
if isinstance(h, deque):
new_headers.append("".join(h).rstrip("\r\n"))
else:
new_headers.append(h.rstrip("\r\n"))
return new_headers
def extend(headers, line):
try:
header = headers.pop()
except IndexError:
# this means that we got invalid header
# ignore it
return
if isinstance(header, deque):
header.append(line)
headers.append(header)
else:
headers.append(deque((header, line)))
def split2(header):
pair = header.split(":", 1)
if len(pair) == 2:
return normalize(pair[0].rstrip()), pair[1].lstrip()
else:
return (None, None)
def to_unicode(val):
if isinstance(val, unicode):
return val
else:
try:
return unicode(val, 'utf-8', 'strict')
except UnicodeDecodeError:
raise DecodingError("Non ascii or utf-8 header value")
| StarcoderdataPython |
3372138 | <gh_stars>0
import copy
from datetime import datetime
import logging
from typing import Iterator, Union, Optional, Tuple
import gridfs
from pymongo import MongoClient, ASCENDING
from bson import ObjectId
from ted_sws import config
from ted_sws.core.model.manifestation import XMLManifestation, RDFManifestation, METSManifestation, Manifestation
from ted_sws.core.model.metadata import NormalisedMetadata
from ted_sws.data_manager.adapters.repository_abc import NoticeRepositoryABC
from ted_sws.core.model.notice import Notice, NoticeStatus
logger = logging.getLogger(__name__)
MONGODB_COLLECTION_ID = "_id"
NOTICE_TED_ID = "ted_id"
NOTICE_STATUS = "status"
NOTICE_CREATED_AT = "created_at"
NOTICE_NORMALISED_METADATA = "normalised_metadata"
NOTICE_PREPROCESSED_XML_MANIFESTATION = "preprocessed_xml_manifestation"
NOTICE_DISTILLED_RDF_MANIFESTATION = "distilled_rdf_manifestation"
NOTICE_RDF_MANIFESTATION = "rdf_manifestation"
NOTICE_METS_MANIFESTATION = "mets_manifestation"
METADATA_PUBLICATION_DATE = "publication_date"
METADATA_DOCUMENT_SENT_DATE = "document_sent_date"
class NoticeRepository(NoticeRepositoryABC):
"""
This repository is intended for storing Notice objects.
"""
_collection_name = "notice_collection"
_database_name = config.MONGO_DB_AGGREGATES_DATABASE_NAME
def __init__(self, mongodb_client: MongoClient, database_name: str = _database_name):
self._database_name = database_name
self.mongodb_client = mongodb_client
notice_db = mongodb_client[self._database_name]
self.file_storage = gridfs.GridFS(notice_db)
self.collection = notice_db[self._collection_name]
self.file_storage_collection = notice_db["fs.files"]
self.file_storage_collection.create_index([("notice_id", ASCENDING)])
def get_file_content_from_grid_fs(self, file_id: str) -> str:
"""
This method load file_content from GridFS by field_id.
:param file_id:
:return:
"""
return self.file_storage.get(file_id=ObjectId(file_id)).read().decode("utf-8")
def put_file_content_in_grid_fs(self, notice_id: str, file_content: str) -> ObjectId:
"""
This method store file_content in GridFS and set notice_id as file metadata.
:param notice_id:
:param file_content:
:return:
"""
return self.file_storage.put(data=file_content.encode("utf-8"), notice_id=notice_id)
def delete_files_by_notice_id(self, linked_file_ids: list):
"""
This method delete all files from GridFS with specific notice_id in metadata.
:param linked_file_ids:
:return:
"""
for linked_file_id in linked_file_ids:
self.file_storage.delete(file_id=linked_file_id)
def write_notice_fields_in_grid_fs(self, notice: Notice) -> Tuple[Notice, list, list]:
"""
This method store large fields in GridFS.
:param notice:
:return:
"""
notice = copy.deepcopy(notice)
linked_file_ids = [linked_file._id for linked_file in
self.file_storage.find({"notice_id": notice.ted_id})]
new_linked_file_ids = []
def write_large_field(large_field: Manifestation):
if (large_field is not None) and (large_field.object_data is not None):
object_id = self.put_file_content_in_grid_fs(notice_id=notice.ted_id,
file_content=large_field.object_data)
large_field.object_data = str(object_id)
new_linked_file_ids.append(object_id)
write_large_field(notice.xml_manifestation)
write_large_field(notice.rdf_manifestation)
write_large_field(notice.mets_manifestation)
write_large_field(notice.distilled_rdf_manifestation)
write_large_field(notice.preprocessed_xml_manifestation)
if notice.rdf_manifestation:
for validation_report in notice.rdf_manifestation.shacl_validations:
write_large_field(validation_report)
for validation_report in notice.rdf_manifestation.sparql_validations:
write_large_field(validation_report)
if notice.distilled_rdf_manifestation:
for validation_report in notice.distilled_rdf_manifestation.shacl_validations:
write_large_field(validation_report)
for validation_report in notice.distilled_rdf_manifestation.sparql_validations:
write_large_field(validation_report)
return notice, linked_file_ids, new_linked_file_ids
def load_notice_fields_from_grid_fs(self, notice: Notice) -> Notice:
"""
This method loads large fields from GridFS.
:param notice:
:return:
"""
def load_large_field(large_field: Manifestation):
if (large_field is not None) and (large_field.object_data is not None):
large_field.object_data = self.get_file_content_from_grid_fs(file_id=large_field.object_data)
load_large_field(large_field=notice.xml_manifestation)
load_large_field(large_field=notice.rdf_manifestation)
load_large_field(large_field=notice.mets_manifestation)
load_large_field(large_field=notice.distilled_rdf_manifestation)
load_large_field(large_field=notice.preprocessed_xml_manifestation)
if notice.rdf_manifestation:
for validation_report in notice.rdf_manifestation.shacl_validations:
load_large_field(validation_report)
for validation_report in notice.rdf_manifestation.sparql_validations:
load_large_field(validation_report)
if notice.distilled_rdf_manifestation:
for validation_report in notice.distilled_rdf_manifestation.shacl_validations:
load_large_field(validation_report)
for validation_report in notice.distilled_rdf_manifestation.sparql_validations:
load_large_field(validation_report)
return notice
@staticmethod
def _create_notice_from_repository_result(notice_dict: dict) -> Union[Notice, None]:
"""
This method allows you to create a Notice from the dictionary extracted from the repository.
:param notice_dict:
:return:
"""
def init_object_from_dict(object_class, key):
if notice_dict[key]:
return object_class(**notice_dict[key])
return None
def date_field_to_string(date_field: datetime):
if date_field:
return date_field.isoformat()
return None
if notice_dict:
del notice_dict[MONGODB_COLLECTION_ID]
notice_dict[NOTICE_CREATED_AT] = notice_dict[NOTICE_CREATED_AT].isoformat()
if notice_dict[NOTICE_NORMALISED_METADATA]:
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_PUBLICATION_DATE] = date_field_to_string(
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_PUBLICATION_DATE])
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_DOCUMENT_SENT_DATE] = date_field_to_string(
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_DOCUMENT_SENT_DATE])
notice = Notice(**notice_dict)
notice._status = NoticeStatus[notice_dict[NOTICE_STATUS]]
notice._normalised_metadata = init_object_from_dict(NormalisedMetadata, NOTICE_NORMALISED_METADATA)
notice._preprocessed_xml_manifestation = init_object_from_dict(XMLManifestation,
NOTICE_PREPROCESSED_XML_MANIFESTATION)
notice._distilled_rdf_manifestation = init_object_from_dict(RDFManifestation,
NOTICE_DISTILLED_RDF_MANIFESTATION)
notice._rdf_manifestation = init_object_from_dict(RDFManifestation, NOTICE_RDF_MANIFESTATION)
notice._mets_manifestation = init_object_from_dict(METSManifestation, NOTICE_METS_MANIFESTATION)
return notice
return None
@staticmethod
def _create_dict_from_notice(notice: Notice) -> dict:
"""
This method allows you to create a dictionary that can be stored in a repository based on a Notice.
:param notice:
:return:
"""
notice_dict = notice.dict()
notice_dict[MONGODB_COLLECTION_ID] = notice_dict[NOTICE_TED_ID]
notice_dict[NOTICE_STATUS] = str(notice_dict[NOTICE_STATUS])
notice_dict[NOTICE_CREATED_AT] = datetime.fromisoformat(notice_dict[NOTICE_CREATED_AT])
if notice_dict[NOTICE_NORMALISED_METADATA]:
if notice_dict[NOTICE_NORMALISED_METADATA][METADATA_PUBLICATION_DATE]:
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_PUBLICATION_DATE] = datetime.fromisoformat(
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_PUBLICATION_DATE])
if notice_dict[NOTICE_NORMALISED_METADATA][METADATA_DOCUMENT_SENT_DATE]:
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_DOCUMENT_SENT_DATE] = datetime.fromisoformat(
notice_dict[NOTICE_NORMALISED_METADATA][METADATA_DOCUMENT_SENT_DATE])
return notice_dict
def _update_notice(self, notice: Notice, upsert: bool = False):
notice, linked_file_ids, new_linked_file_ids = self.write_notice_fields_in_grid_fs(notice=notice)
notice_dict = NoticeRepository._create_dict_from_notice(notice=notice)
try:
self.collection.update_one({MONGODB_COLLECTION_ID: notice_dict[MONGODB_COLLECTION_ID]},
{"$set": notice_dict}, upsert=upsert)
self.delete_files_by_notice_id(linked_file_ids=linked_file_ids)
except Exception as exception:
self.delete_files_by_notice_id(linked_file_ids=new_linked_file_ids)
raise exception
def add(self, notice: Notice):
"""
This method allows you to add notice objects to the repository.
:param notice:
:return:
"""
self._update_notice(notice=notice, upsert=True)
def update(self, notice: Notice):
"""
This method allows you to update notice objects to the repository
:param notice:
:return:
"""
notice_exist = self.collection.find_one({MONGODB_COLLECTION_ID: notice.ted_id})
if notice_exist is not None:
self._update_notice(notice=notice)
def get(self, reference) -> Optional[Notice]:
"""
This method allows a notice to be obtained based on an identification reference.
:param reference:
:return: Notice
"""
result_dict = self.collection.find_one({MONGODB_COLLECTION_ID: reference})
if result_dict is not None:
notice = NoticeRepository._create_notice_from_repository_result(result_dict)
notice = self.load_notice_fields_from_grid_fs(notice)
return notice
return None
def get_notice_by_status(self, notice_status: NoticeStatus) -> Iterator[Notice]:
"""
This method provides all notices based on its status.
:param notice_status:
:return:
"""
for result_dict in self.collection.find({NOTICE_STATUS: str(notice_status)}):
notice = NoticeRepository._create_notice_from_repository_result(result_dict)
notice = self.load_notice_fields_from_grid_fs(notice)
yield notice
def list(self) -> Iterator[Notice]:
"""
This method allows all records to be retrieved from the repository.
:return: list of notices
"""
for result_dict in self.collection.find():
notice = NoticeRepository._create_notice_from_repository_result(result_dict)
notice = self.load_notice_fields_from_grid_fs(notice)
yield notice
| StarcoderdataPython |
12859652 | from django.contrib import admin
from .models import Image,Areas,Category
# Register your models here.
admin.site.register(Image)
admin.site.register(Areas)
admin.site.register(Category) | StarcoderdataPython |
6601703 | #!/usr/bin/env python
__FILENAME__="powershell.py"
__AUTHOR__="<NAME>"
__VERSION__="1.1.1"
__COPYRIGHT__="Copyright (c) 2018-2021 %s" % (__AUTHOR__)
__LICENSE__="MIT License (https://dwettstein.mit-license.org/)"
__LINK__="https://github.com/dwettstein/PythonStuff"
__DESCRIPTION__=(
"This module contains utility functions for executing PowerShell scripts "
"or code."
)
# Changelog:
# - v1.1.1, 2021-12-19, <NAME>: Refactor header part.
# - v1.1.0, 2021-04-20, <NAME>: Add cross-platform bin.
# - v1.0.1, 2020-11-29, <NAME>: Use -Command not -File.
# - v1.0.0, 2018-11-26, <NAME>: Initial module.
import platform
from . import childprocess
LOG = True
DEBUG = False
POWERSHELL_PATHS = {
"Windows": "C:\\WINDOWS\\system32\\WindowsPowerShell\\v1.0\\powershell.exe",
"Linux": "/usr/bin/pwsh",
"Darwin": "/usr/local/bin/pwsh", # MacOS
}
def execute_script(script_path: str,
script_inputs: list = None,
powershell_exe_path: str = None,
execution_policy: str = None) -> tuple:
"""
Execute a PowerShell script.
See also:
https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_powershell_exe
Args:
script_path: A string with the full path to a script.
script_inputs: A list containing input parameters for the script
(be aware of the order), default is None.
powershell_exe_path: The path to the PowerShell exe, default is None.
For Windows, default is
"C:\\WINDOWS\\system32\\WindowsPowerShell\\v1.0\\powershell.exe".
For Linux and MacOS, default is "/usr/bin/pwsh".
execution_policy: The execution policy for PowerShell, default is None.
For Windows, default is "RemoteSigned".
See also:
https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_execution_policies
Returns:
A tuple containing the script output, error and return code.
If no output or error is available, empty strings will be sent.
(_out, _err, _ret)
Raises:
"""
if LOG:
if DEBUG:
print("Executing %s with inputs %s" % (
script_path, str(script_inputs)))
else:
print("Executing %s" % (script_path))
# Set default values if not provided as args.
if powershell_exe_path is None:
powershell_exe_path = POWERSHELL_PATHS.get(platform.system())
assert powershell_exe_path, (
"Unknown platform, please provide the path to PowerShell as argument."
)
if execution_policy is None and platform.system() == "Windows":
execution_policy = "RemoteSigned"
# Prepare arguments for subprocess.
_process_args = [
powershell_exe_path,
"-NoLogo",
"-NoProfile",
"-NonInteractive",
]
if execution_policy is not None:
_process_args.extend(["-ExecutionPolicy", execution_policy])
# Use -Command as -File doesn't work properly with begin, process, end blocks.
_process_args.extend(["-Command", script_path])
if script_inputs:
# Add script inputs if any.
# Surround with quotes if the input contains spaces and escape quotes within it.
_process_args.extend(
["\"%s\"" % str(i).replace('"', '`"') if " " in i else i for i in script_inputs]
)
# Preserve a possible script specific exit code.
_process_args.extend(["; exit $LASTEXITCODE"])
try:
if LOG and DEBUG:
print(_process_args)
(_out, _err, _ret) = childprocess.execute(_process_args)
_out = _out.rstrip("\n") # Remove empty line at the end.
_err = _err.rstrip("\n") # Remove empty line at the end.
except Exception as ex:
err_msg = ("Failed to execute PowerShell script with "
"args %s. Exception: %s" % (_process_args, str(ex)))
if LOG:
print(err_msg)
_out = ""
_err = err_msg
if _err != "":
_ret = 1
if LOG and DEBUG:
print("Script %s ended with exit code %s and result %s" % (
script_path, _ret, (_err if _err else _out)))
return (_out, _err, _ret)
| StarcoderdataPython |
3222401 | from guizero import App, TextBox, PushButton, Text, alerts
from tkinter.font import Font
def go():
alerts.info("hi", "hi " + textbox.value)
app = App()
text = Text(app, text="Enter your name")
textbox = TextBox(app)
button = PushButton(app, text="Hi", command=go)
app.display() | StarcoderdataPython |
9789985 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Parse FIO output:
'''
import os
import argparse
import rex
import prettytable
def parse_fio_file(filename, verbose=False):
'''
Parse the FIO output and return a
dict
'''
job_pattern = r"job.*rw=(w:<type>).*bs=(w:<blocksize>)-.*" \
"ioengine=(w:<ioengine>).*iodepth=(d:<iodepth>)"
job_rex_pattern = rex.reformat_pattern(job_pattern, compile=True)
job1_pattern = r"job.* pid=(d:<pid>):(any:<timestamp>)"
job1_rex_pattern = rex.reformat_pattern(job1_pattern, compile=True)
read_aggr_pattern = r".*READ:.*io=(measurement:<io>),.*" \
"aggrb=(measurement:<aggrbw>),.*minb=(measurement:<minavgbw>),.*" \
"maxb=(measurement:<maxavgbw>),.*mint=(measurement:<minruntime>),.*" \
"maxt=(measurement:<maxruntime>)"
read_aggr_rex_pattern = rex.reformat_pattern(read_aggr_pattern,
compile=True)
write_aggr_pattern = r".*WRITE:.*io=(measurement:<io>),.*" \
"aggrb=(measurement:<aggrbw>),.*minb=(measurement:<minavgbw>),.*" \
"maxb=(measurement:<maxavgbw>),.*mint=(measurement:<minruntime>),.*" \
"maxt=(measurement:<maxruntime>)"
write_aggr_rex_pattern = rex.reformat_pattern(write_aggr_pattern,
compile=True)
cpu_pattern = r".*cpu.*:.*usr=(decimal:<user>)%,.*" \
"sys=(decimal:<system>)%,.*ctx=(d:<context_switches>),.*" \
"majf=(d:<majfault>),.*minf=(d:<minfault>)"
cpu_rex_pattern = rex.reformat_pattern(cpu_pattern, compile=True)
fhandle = open(filename, 'r')
data = fhandle.read()
fio_result = {}
for line in data.splitlines():
mobj = job_rex_pattern.match(line)
if mobj:
if verbose:
print "job pattern: ", line
print "match: ", mobj.groups(0)
fio_result['blocksize'] = mobj.group('blocksize')
fio_result['jobtype'] = mobj.group('type')
fio_result['ioengine'] = mobj.group('ioengine')
fio_result['iodepth'] = mobj.group('iodepth')
mobj = job1_rex_pattern.match(line)
if mobj:
if verbose:
print "job1 pattern: ", line
print "match: ", mobj.groups(0)
fio_result['pid'] = mobj.group('pid')
fio_result['timestamp'] = mobj.group('timestamp')
mobj = read_aggr_rex_pattern.match(line)
if mobj:
if verbose:
print "Read aggr: ", line
print "match: ", mobj.groups(0)
fio_result['aggr_read'] = {}
fio_result['aggr_read']['io'] = mobj.group('io')
fio_result['aggr_read']['io_unit'] = mobj.group('io_unit')
fio_result['aggr_read']['aggrbw'] = mobj.group('aggrbw')
fio_result['aggr_read']['aggrbw_unit'] = mobj.group('aggrbw_unit')
fio_result['aggr_read']['minavgbw'] = mobj.group('minavgbw')
fio_result['aggr_read']['minavgbw_unit'] = \
mobj.group('minavgbw_unit')
fio_result['aggr_read']['maxavgbw'] = mobj.group('maxavgbw')
fio_result['aggr_read']['maxavgbw_unit'] = \
mobj.group('maxavgbw_unit')
fio_result['aggr_read']['minruntime'] = mobj.group('minruntime')
fio_result['aggr_read']['minruntime_unit'] = \
mobj.group('minruntime_unit')
fio_result['aggr_read']['maxruntime'] = mobj.group('maxruntime')
fio_result['aggr_read']['maxruntime_unit'] = \
mobj.group('maxruntime_unit')
mobj = write_aggr_rex_pattern.match(line)
if mobj:
if verbose:
print "Write aggr: ", line
print "match: ", mobj.groups(0)
fio_result['aggr_write'] = {}
fio_result['aggr_write']['io'] = mobj.group('io')
fio_result['aggr_write']['io_unit'] = mobj.group('io_unit')
fio_result['aggr_write']['aggrbw'] = mobj.group('aggrbw')
fio_result['aggr_write']['aggrbw_unit'] = mobj.group('aggrbw_unit')
fio_result['aggr_write']['minavgbw'] = mobj.group('minavgbw')
fio_result['aggr_write']['minavgbw_unit'] = \
mobj.group('minavgbw_unit')
fio_result['aggr_write']['maxavgbw'] = mobj.group('maxavgbw')
fio_result['aggr_write']['maxavgbw_unit'] = \
mobj.group('maxavgbw_unit')
fio_result['aggr_write']['minruntime'] = mobj.group('minruntime')
fio_result['aggr_write']['minruntime_unit'] = \
mobj.group('minruntime_unit')
fio_result['aggr_write']['maxruntime'] = mobj.group('maxruntime')
fio_result['aggr_write']['maxruntime_unit'] = \
mobj.group('maxruntime_unit')
mobj = cpu_rex_pattern.match(line)
if mobj:
if verbose:
print "cpu pattern: ", line
print "match: ", mobj.groups(0)
fio_result['cpu_usage'] = {}
fio_result['cpu_usage']['user'] = mobj.group('user')
fio_result['cpu_usage']['system'] = mobj.group('system')
fio_result['cpu_usage']['context_switches'] = \
mobj.group('context_switches')
fio_result['cpu_usage']['majfault'] = mobj.group('majfault')
fio_result['cpu_usage']['minfault'] = mobj.group('minfault')
return fio_result
def validate_filelist(filelist):
'''
Validate if all files exists in the filelist
Return:
true: if all files exist
false: if any check fails
'''
for filename in filelist:
if not os.path.exists(filename) or \
os.path.isdir(filename):
print "Invalid file [%s]" % filename
return False
return True
def display_fiodata_tabular(fioresults):
'''
Given the fioresults data, print it in
tabular format.
'''
# Setup table.
table_header = ["test", "ioengine", "size",
"Write (IO)", "Write (BW)",
"Read (IO)", "Read (BW)"]
table = prettytable.PrettyTable(table_header)
for result in fioresults:
row = []
row.append(result['jobtype'])
row.append(result['ioengine'])
row.append(result['blocksize'])
try:
iostr = result['aggr_write']['io'] + " " + \
result['aggr_write']['io_unit']
except KeyError:
iostr = "X"
row.append(iostr)
try:
bwstr = result['aggr_write']['aggrbw'] + " " + \
result['aggr_write']['aggrbw_unit']
except KeyError:
bwstr = "X"
row.append(bwstr)
try:
iostr = result['aggr_read']['io'] + " " + \
result['aggr_read']['io_unit']
except KeyError:
iostr = "X"
row.append(iostr)
try:
bwstr = result['aggr_read']['aggrbw'] + " " + \
result['aggr_read']['aggrbw_unit']
except KeyError:
bwstr = "X"
row.append(bwstr)
table.add_row(row)
print table
def parse_fio_output_files(namespace):
'''
Read all the files for fio_data and parse them.
'''
filelist = namespace.output
if not validate_filelist(filelist):
return None
results = []
for filename in filelist:
results.append(parse_fio_file(filename, verbose=namespace.verbose))
display_fiodata_tabular(results)
def parse_arguments():
'''
Parse cmdline arguments
'''
parser = argparse.ArgumentParser(
prog="fioparse.py",
description="FIO Parser")
parser.add_argument("-o", "--output",
required=True,
nargs='*',
help="FIO output files to parse")
parser.add_argument("-v", "--verbose",
required=False,
action="store_true",
help="Enable Verbose")
namespace = parser.parse_args()
return namespace
def main():
namespace = parse_arguments()
parse_fio_output_files(namespace)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9660843 | <filename>json2tree/__main__.py
import argparse
import logging
import os
import sys
from json2tree.theme_1 import html as html_1
from json2tree.theme_2 import html as html_2
import json
def readJSON(file_path, theme):
f = open(file_path)
json_data = json.load(f)
html_string = ''
if(theme=='1'):
html_string = html_1.create_html_report(json_data)
elif(theme=='2'):
html_string = html_2.create_html_report(json_data)
else:
html_string = html_1.create_html_report(json_data)
return html_string
def create_output_file(output_file_path, html_string):
with open(output_file_path, 'w') as f:
f.write(html_string)
f.close()
def run(args):
if args.json:
if os.path.exists(args.json):
if args.output_file is None:
sys.stderr.write("Output file not specified")
html_string = readJSON(args.json, args.theme)
create_output_file(args.output_file, html_string)
else:
sys.stderr.write("Input file not specified")
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
prog='json2tree',
description='''
json2tree helps you to create an html tree view for your json file.
For comprehensive an intiutive analysis.
Learn more at https://github.com/abhaykatheria/json2tree''')
parser.add_argument('-j', '--json',
help="Input JSON file"
"give the path to the JSON file")
parser.add_argument('-t', '--theme',
help="Select the theme to use. To know about theme visit"
"official repository")
parser.add_argument('-o', '--output-file',
help="give the path of the ouput file")
py_ver = sys.version.replace('\n', '').split('[')[0]
parser.add_argument('-v', '--version', action='version',
version="{ver_str}\n python version = {py_v}".format(
ver_str="0.1.0", py_v=py_ver))
args, unknown = parser.parse_known_args()
if sys.version_info < (3, 0):
sys.stderr.write("Errrrrrrr.....Please run on Python 3.7+")
else:
run(args) | StarcoderdataPython |
324538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.vmware import connect_to_api, gather_vm_facts
HAS_PYVMOMI = False
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
pass
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_guest_tools_wait
short_description: Wait for VMware tools to become available and return facts
description:
- Wait for VMware tools to become available on the VM and return facts
version_added: 2.4
author:
- <NAME> <<EMAIL>>
notes:
- Tested on vSphere 6.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
name:
description:
- Name of the VM to work with
required: True
name_match:
description:
- If multiple VMs matching the name, use the first or last found
default: 'first'
choices: ['first', 'last']
uuid:
description:
- UUID of the instance to manage if known, this is VMware's unique identifier.
- This is required if name is not supplied.
folder:
description:
- Destination folder, absolute path to find an existing guest.
- This is required if name is supplied.
datacenter:
description:
- Destination datacenter for the deploy operation
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Wait for VMware tools to become available
vmware_guest_tools_wait:
hostname: 192.168.1.209
username: <EMAIL>
password: <PASSWORD>
validate_certs: no
uuid: 421e4592-c069-924d-ce20-7e7533fab926
delegate_to: localhost
register: facts
'''
RETURN = """
instance:
description: metadata about the virtual machine
returned: always
type: dict
sample: None
"""
class PyVmomiHelper(object):
def __init__(self, module):
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi module required')
self.module = module
self.params = module.params
self.content = connect_to_api(self.module)
def getvm(self, name=None, uuid=None, folder=None):
si = self.content.searchIndex
vm = None
if uuid:
vm = si.FindByUuid(instanceUuid=False, uuid=uuid, vmSearch=True)
elif folder:
# Build the absolute folder path to pass into the search method
if not self.params['folder'].startswith('/'):
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
searchpath = '%(datacenter)s%(folder)s' % self.params
# get all objects for this path ...
f_obj = self.content.searchIndex.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == name:
vm = c_obj
if self.params['name_match'] == 'first':
break
return vm
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def wait_for_tools(self, vm, poll=100, sleep=5):
tools_running = False
vm_facts = {}
poll_num = 0
vm_uuid = vm.config.uuid
while not tools_running and poll_num <= poll:
newvm = self.getvm(uuid=vm_uuid)
vm_facts = self.gather_facts(newvm)
if vm_facts['guest_tools_status'] == 'guestToolsRunning':
tools_running = True
else:
time.sleep(sleep)
poll_num += 1
if not tools_running:
return {'failed': True, 'msg': 'VMware tools either not present or not running after {0} seconds'.format((poll * sleep))}
changed = False
if poll_num > 0:
changed = True
return {'changed': changed, 'failed': False, 'instance': vm_facts}
def get_obj(content, vimtype, name):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
def main():
module = AnsibleModule(
argument_spec=dict(
hostname=dict(
type='str',
default=os.environ.get('VMWARE_HOST')
),
username=dict(
type='str',
default=os.environ.get('VMWARE_USER')
),
password=dict(
type='str', no_log=True,
default=os.environ.get('VMWARE_PASSWORD')
),
validate_certs=dict(required=False, type='bool', default=True),
name=dict(required=True, type='str'),
name_match=dict(required=False, type='str', default='first'),
uuid=dict(required=False, type='str'),
folder=dict(required=False, type='str', default='/vm'),
datacenter=dict(required=True, type='str'),
),
)
# Prepend /vm if it was missing from the folder path, also strip trailing slashes
if not module.params['folder'].startswith('/vm') and module.params['folder'].startswith('/'):
module.params['folder'] = '/vm%(folder)s' % module.params
module.params['folder'] = module.params['folder'].rstrip('/')
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.getvm(name=module.params['name'],
folder=module.params['folder'],
uuid=module.params['uuid'])
# VM already exists
if vm:
try:
result = pyv.wait_for_tools(vm)
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
except Exception:
e = get_exception()
module.fail_json(msg="Waiting for tools failed with exception: %s" % e)
else:
module.fail_json(msg="Unable to wait for tools for non-existing VM %(name)s" % module.params)
if __name__ == '__main__':
main()
| StarcoderdataPython |
381098 | from aiohttp import ClientSession
import pytest # noqa: F401
import pytest_asyncio.plugin # noqa: F401
import aiohttp_github_helpers as h
TEST_OWNER = "metwork-framework"
TEST_REPO = "testrepo"
@pytest.mark.asyncio
async def test_github_get_labels_on_issue():
async with ClientSession() as client_session:
labels = await h.github_get_labels_on_issue(client_session, TEST_OWNER,
TEST_REPO, 38)
assert len(labels) == 2
assert sorted(labels)[0] == 'Priority: Low'
assert sorted(labels)[1] == 'Status: Closed'
@pytest.mark.asyncio
async def test_github_get_pr_commit_messages_list():
async with ClientSession() as client_session:
messages = await h.github_get_pr_commit_messages_list(client_session,
TEST_OWNER,
TEST_REPO,
58)
assert len(messages) == 1
assert messages[0] == 'Update README.md'
@pytest.mark.asyncio
async def test_github_get_statuses():
ref = "129ae457d5cd404ec76ab51ae70dbc137b4aae6d"
async with ClientSession() as client_session:
status = await h.github_get_status(client_session, TEST_OWNER,
TEST_REPO, ref)
assert status == 'failure'
@pytest.mark.asyncio
async def test_github_get_open_prs_by_sha():
sha = "129ae457d5cd404ec76ab51ae70dbc137b4aae6d"
async with ClientSession() as client_session:
prs = await h.github_get_open_prs_by_sha(client_session, TEST_OWNER,
TEST_REPO, sha,
state='all')
assert len(prs) == 1
assert prs[0] == 61
@pytest.mark.asyncio
async def test_github_get_org_repos_by_topic():
async with ClientSession() as client_session:
repos = await h.github_get_org_repos_by_topic(client_session,
TEST_OWNER)
assert len(repos) > 5
repos = await h.github_get_org_repos_by_topic(client_session,
TEST_OWNER,
["metwork"])
assert len(repos) > 0
repos = await h.github_get_org_repos_by_topic(client_session,
TEST_OWNER,
["not_found"])
assert len(repos) == 0
repos = await h.github_get_org_repos_by_topic(client_session,
TEST_OWNER,
["metwork"],
["metwork"])
assert len(repos) == 0
@pytest.mark.asyncio
async def test_github_get_latest_commit():
async with ClientSession() as client_session:
(sha, age) = await h.github_get_latest_commit(client_session,
TEST_OWNER, TEST_REPO,
"master")
assert len(sha) == 40
assert age > 1
@pytest.mark.asyncio
async def test_github_get_repo_topics():
async with ClientSession() as client_session:
topics = await h.github_get_repo_topics(client_session,
TEST_OWNER, TEST_REPO)
assert len(topics) > 1
assert "testrepo" in topics
@pytest.mark.asyncio
async def test_github_get_pr_reviews():
async with ClientSession() as client_session:
reviews = await h.github_get_pr_reviews(client_session,
TEST_OWNER, TEST_REPO, 81)
assert len(reviews) == 1
assert reviews[0]['sha'] == '04c830ffccc17bb90119cd3a89a151faba63ec97'
assert reviews[0]['user_login'] == 'metworkbot'
assert reviews[0]['state'] == 'APPROVED'
| StarcoderdataPython |
8162665 | for _ in range(int(input())):
l = input().split()
r = float(l[0])
for i in l[1:]:
if i == '@':
r *= 3
elif i == '%':
r += 5
elif i == '#':
r -= 7
print("{:.2f}".format(r))
| StarcoderdataPython |
9765510 | import pytest
from app.html.block_builder import BlockBuilder, HeadingBuilder, QuoteBuilder, ListBuilder, ListItemBuilder, \
CodeBlockBuilder, HorizontalRuleBuilder
from app.converter.converter import Converter
from app.element.block import CodeBlock
from app.markdown.block_parser import BlockParser, HeadingParser, QuoteParser, ListParser, HorizontalRuleParser, \
CodeBlockParser
from app.markdown.inline_parser import InlineParser
from app.markdown.parser import MarkdownParser
from app.settings import setting
from tests.factory.block_factory import ListItemFactory
# よく使う設定値
LINE_BREAK = setting['newline_code']
INDENT = setting['indent']
class TestBlockBuilder:
""" Block要素からHTML文字列が得られるか検証 """
# HTML文字列組み立て
@pytest.mark.parametrize(
('block_text', 'child_text', 'expected'),
[
(
'plain text',
'plain text',
f'<p class="{setting["class_name"]["p"]}">{LINE_BREAK}{INDENT}plain text{LINE_BREAK}</p>'
),
(
'# 概要',
'概要',
(
f'<h1 class="{setting["class_name"]["h1"]}">{LINE_BREAK}'
f'{INDENT}概要{LINE_BREAK}'
f'</h1>'
)
),
(
'> と言いました',
'と言いました',
(
f'<blockquote class="{setting["class_name"]["blockquote"]}">{LINE_BREAK}'
f'と言いました'
f'</blockquote>'
)
)
],
ids=['plain', 'heading', 'quote'])
def test_build(self, block_text: str, child_text: str, expected: str):
# GIVEN
sut = BlockBuilder()
block = BlockParser().parse(block_text, InlineParser().parse(child_text))
# WHEN
actual = sut.build(block, child_text)
# THEN
assert actual == expected
class TestHeadingBuilder:
""" HeadingBlock要素からヘッダと対応するHTML文字列を組み立てられるか検証 """
# 対象判定
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'# this is a heading',
True
),
(
'plain text',
False
),
],
ids=['target', 'not target'])
def test_target(self, text: str, expected: bool):
# GIVEN
sut = HeadingBuilder()
parser = BlockParser()
child_text = parser.extract_inline_text(text)
block = parser.parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.is_target(block)
# THEN
assert actual == expected
# HTML文字列組み立て
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'# first heading',
(
f'<h1 class="{setting["class_name"]["h1"]}">{LINE_BREAK}'
f'{INDENT}first heading{LINE_BREAK}'
f'</h1>'
)
),
(
'#### 補足: これは補足です',
(
f'<h4 class="{setting["class_name"]["h4"]}">{LINE_BREAK}'
f'{INDENT}補足: これは補足です{LINE_BREAK}'
f'</h4>'
)
)
],
ids=['first', '4th'])
def test_build(self, text: str, expected: str):
# GIVEN
sut = HeadingBuilder()
parser = HeadingParser()
child_text = parser.extract_text(text)
block = HeadingParser().parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.build(block, child_text)
# THEN
assert actual == expected
class TestQuoteBuilder:
""" blockquoteタグ文字列の組み立てを検証 """
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'> これは引用です',
True
),
(
'[参考](url)',
False
)
],
ids=['target', 'not target']
)
def test_is_target(self, text: str, expected: bool):
# GIVEN
sut = QuoteBuilder()
parser = BlockParser()
child_text = parser.extract_inline_text(text)
block = parser.parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.is_target(block)
# THEN
assert actual == expected
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'> それが問題です',
(
f'<blockquote class="{setting["class_name"]["blockquote"]}">{LINE_BREAK}'
f'それが問題です'
f'</blockquote>'
)
)
]
)
def test_build(self, text: str, expected: str):
# GIVEN
sut = QuoteBuilder()
parser = QuoteParser()
child_text = parser.extract_text(text)
block = parser.parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.build(block, child_text)
# THEN
assert actual == expected
class TestListBuilder:
""" ulタグ文字列要素の組み立て """
# 対象判定
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'* task1',
True
),
('やること',
False
),
],
ids=['target', 'not target']
)
def test_is_target(self, text: str, expected: bool):
sut = ListBuilder()
parser = BlockParser()
child_text = parser.extract_inline_text(text)
block = parser.parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.is_target(block)
# THEN
assert actual == expected
# ビルド結果
# 要素自体の改行/インデントは子要素のビルダが担う
# これは、子要素のliは複数行に及び、子要素1行分に対してのみ改行やインデントを適用するとかえって扱いづらくなるためである
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'- no.1',
(
f'<ul class="{setting["class_name"]["ul"]}">{LINE_BREAK}'
f'no.1'
f'</ul>'
)
)
]
)
def test_build(self, text: str, expected: str):
# GIVEN
sut = ListBuilder()
parser = ListParser()
child_text = parser.extract_text(text)
block = parser.parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.build(block, child_text)
# THEN
assert actual == expected
class TestListItemBuilder:
""" liタグ文字列要素の組み立て """
# ビルド対象
@pytest.mark.parametrize(
'text',
['最初の要素']
)
def test_is_target_list_item(self, text: str):
sut = ListItemBuilder()
block = ListItemFactory().create_single_list_item(text)
# WHEN
actual = sut.is_target(block)
# THEN
assert actual is True
# ビルド対象でない
@pytest.mark.parametrize(
'text',
[
'* 1st element'
]
)
def test_is_target_not_list_item(self, text: str):
sut = ListItemBuilder()
parser = BlockParser()
child_text = parser.extract_inline_text(text)
block = parser.parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.is_target(block)
# THEN
assert actual is False
# ビルド結果
# li要素はulの子となることが前提なので、インデント階層を含む
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'やりたいことその1',
(
f'{INDENT}<li class="{setting["class_name"]["li"]}">{LINE_BREAK}'
f'{INDENT}{INDENT}やりたいことその1{LINE_BREAK}'
f'{INDENT}</li>'
)
)
],
ids=['list item']
)
def test_build(self, text: str, expected: str):
# GIVEN
sut = ListItemBuilder()
block = ListItemFactory().create_single_list_item(text)
# WHEN
actual = sut.build(block, text)
# THEN
assert actual == expected
class TestCodeBlockBuilder:
""" pre, codeタグ文字列を組み立てられるか """
# ビルド対象
@pytest.mark.parametrize(
'text',
[
'```Java',
'```',
]
)
def test_is_target_target(self, text: str):
# GIVEN
sut = CodeBlockBuilder()
block = CodeBlockParser().parse(text, InlineParser().parse(''))
# WHEN
actual = sut.is_target(block)
# THEN
assert actual is True
# ビルド対象でない
@pytest.mark.parametrize(
'text',
[
'## 概要',
'// コメントだけれどコードではない',
]
)
def test_is_target_not_target(self, text: str):
# GIVEN
sut = CodeBlockBuilder()
parser = BlockParser()
child_text = parser.extract_inline_text(text)
block = parser.parse(text, InlineParser().parse(child_text))
# WHEN
actual = sut.is_target(block)
# THEN
assert actual is False
# pre, codeタグ文字列の組み立て
# 要素自体の改行/インデントはconverterが責務を持つ
# これは、子要素は複数行に及び、子要素1行分に対してのみ改行やインデントを適用するとかえって扱いづらくなるためである
@pytest.mark.parametrize(
('lines', 'child_text', 'expected'),
[
(
[
'```Java',
'List<String> list;',
'```',
],
'List<String> list;',
(
f'<pre>{LINE_BREAK}'
f'{INDENT}<code class="language-java hljs">{LINE_BREAK}'
f'List<String> list;'
f'{INDENT}</code>{LINE_BREAK}'
f'</pre>'
)
),
(
[
'```',
'## [参考](url)',
'```'
],
'## [参考](url)',
(
f'<pre>{LINE_BREAK}'
f'{INDENT}<code class="language- hljs">{LINE_BREAK}'
f'## [参考](url)'
f'{INDENT}</code>{LINE_BREAK}'
f'</pre>'
)
),
],
ids=['code', 'markdown text']
)
def test_build(self, lines: list[str], child_text: str, expected: str):
# GIVEN
sut = CodeBlockBuilder()
code_block = Converter().convert(MarkdownParser().parse(lines)).content[0]
if not isinstance(code_block, CodeBlock):
assert False
# WHEN
actual = sut.build(code_block, child_text)
# THEN
assert actual == expected
class TestHorizontalRuleBuilder:
""" HorizontalRuleBlock要素からhrタグと対応するHTML文字列が得られるか検証 """
# 対象判定
@pytest.mark.parametrize(
('text', 'expected'), [
('---', True),
('--', False),
],
ids=['target', 'not target'])
def test_target(self, text: str, expected: bool):
# GIVEN
sut = HorizontalRuleBuilder()
block = BlockParser().parse(text, InlineParser().parse(text))
# WHEN
actual = sut.is_target(block)
# THEN
assert actual == expected
# HTML組み立て
@pytest.mark.parametrize(
('text', 'expected'),
[
(
'---',
f'<hr class="{setting["class_name"]["hr"]}">'
),
]
)
def test_build(self, text: str, expected: str):
# GIVEN
sut = HorizontalRuleBuilder()
block = HorizontalRuleParser().parse(text, InlineParser().parse(''))
# WHEN
actual = sut.build(block, '')
# THEN
assert actual == expected
| StarcoderdataPython |
1934702 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServerArgs', 'Server']
@pulumi.input_type
class ServerArgs:
def __init__(__self__, *,
certificate: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
endpoint_details: Optional[pulumi.Input['ServerEndpointDetailsArgs']] = None,
endpoint_type: Optional[pulumi.Input[str]] = None,
identity_provider_details: Optional[pulumi.Input['ServerIdentityProviderDetailsArgs']] = None,
identity_provider_type: Optional[pulumi.Input[str]] = None,
logging_role: Optional[pulumi.Input[str]] = None,
post_authentication_login_banner: Optional[pulumi.Input[str]] = None,
pre_authentication_login_banner: Optional[pulumi.Input[str]] = None,
protocol_details: Optional[pulumi.Input['ServerProtocolDetailsArgs']] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input['ServerProtocolArgs']]]] = None,
security_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['ServerTagArgs']]]] = None,
workflow_details: Optional[pulumi.Input['ServerWorkflowDetailsArgs']] = None):
"""
The set of arguments for constructing a Server resource.
"""
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if endpoint_details is not None:
pulumi.set(__self__, "endpoint_details", endpoint_details)
if endpoint_type is not None:
pulumi.set(__self__, "endpoint_type", endpoint_type)
if identity_provider_details is not None:
pulumi.set(__self__, "identity_provider_details", identity_provider_details)
if identity_provider_type is not None:
pulumi.set(__self__, "identity_provider_type", identity_provider_type)
if logging_role is not None:
pulumi.set(__self__, "logging_role", logging_role)
if post_authentication_login_banner is not None:
pulumi.set(__self__, "post_authentication_login_banner", post_authentication_login_banner)
if pre_authentication_login_banner is not None:
pulumi.set(__self__, "pre_authentication_login_banner", pre_authentication_login_banner)
if protocol_details is not None:
pulumi.set(__self__, "protocol_details", protocol_details)
if protocols is not None:
pulumi.set(__self__, "protocols", protocols)
if security_policy_name is not None:
pulumi.set(__self__, "security_policy_name", security_policy_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workflow_details is not None:
pulumi.set(__self__, "workflow_details", workflow_details)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter(name="endpointDetails")
def endpoint_details(self) -> Optional[pulumi.Input['ServerEndpointDetailsArgs']]:
return pulumi.get(self, "endpoint_details")
@endpoint_details.setter
def endpoint_details(self, value: Optional[pulumi.Input['ServerEndpointDetailsArgs']]):
pulumi.set(self, "endpoint_details", value)
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "endpoint_type")
@endpoint_type.setter
def endpoint_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint_type", value)
@property
@pulumi.getter(name="identityProviderDetails")
def identity_provider_details(self) -> Optional[pulumi.Input['ServerIdentityProviderDetailsArgs']]:
return pulumi.get(self, "identity_provider_details")
@identity_provider_details.setter
def identity_provider_details(self, value: Optional[pulumi.Input['ServerIdentityProviderDetailsArgs']]):
pulumi.set(self, "identity_provider_details", value)
@property
@pulumi.getter(name="identityProviderType")
def identity_provider_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "identity_provider_type")
@identity_provider_type.setter
def identity_provider_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_provider_type", value)
@property
@pulumi.getter(name="loggingRole")
def logging_role(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "logging_role")
@logging_role.setter
def logging_role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logging_role", value)
@property
@pulumi.getter(name="postAuthenticationLoginBanner")
def post_authentication_login_banner(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "post_authentication_login_banner")
@post_authentication_login_banner.setter
def post_authentication_login_banner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "post_authentication_login_banner", value)
@property
@pulumi.getter(name="preAuthenticationLoginBanner")
def pre_authentication_login_banner(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pre_authentication_login_banner")
@pre_authentication_login_banner.setter
def pre_authentication_login_banner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pre_authentication_login_banner", value)
@property
@pulumi.getter(name="protocolDetails")
def protocol_details(self) -> Optional[pulumi.Input['ServerProtocolDetailsArgs']]:
return pulumi.get(self, "protocol_details")
@protocol_details.setter
def protocol_details(self, value: Optional[pulumi.Input['ServerProtocolDetailsArgs']]):
pulumi.set(self, "protocol_details", value)
@property
@pulumi.getter
def protocols(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServerProtocolArgs']]]]:
return pulumi.get(self, "protocols")
@protocols.setter
def protocols(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServerProtocolArgs']]]]):
pulumi.set(self, "protocols", value)
@property
@pulumi.getter(name="securityPolicyName")
def security_policy_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "security_policy_name")
@security_policy_name.setter
def security_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_policy_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServerTagArgs']]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServerTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="workflowDetails")
def workflow_details(self) -> Optional[pulumi.Input['ServerWorkflowDetailsArgs']]:
return pulumi.get(self, "workflow_details")
@workflow_details.setter
def workflow_details(self, value: Optional[pulumi.Input['ServerWorkflowDetailsArgs']]):
pulumi.set(self, "workflow_details", value)
warnings.warn("""Server is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class Server(pulumi.CustomResource):
warnings.warn("""Server is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
endpoint_details: Optional[pulumi.Input[pulumi.InputType['ServerEndpointDetailsArgs']]] = None,
endpoint_type: Optional[pulumi.Input[str]] = None,
identity_provider_details: Optional[pulumi.Input[pulumi.InputType['ServerIdentityProviderDetailsArgs']]] = None,
identity_provider_type: Optional[pulumi.Input[str]] = None,
logging_role: Optional[pulumi.Input[str]] = None,
post_authentication_login_banner: Optional[pulumi.Input[str]] = None,
pre_authentication_login_banner: Optional[pulumi.Input[str]] = None,
protocol_details: Optional[pulumi.Input[pulumi.InputType['ServerProtocolDetailsArgs']]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServerProtocolArgs']]]]] = None,
security_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServerTagArgs']]]]] = None,
workflow_details: Optional[pulumi.Input[pulumi.InputType['ServerWorkflowDetailsArgs']]] = None,
__props__=None):
"""
Resource Type definition for AWS::Transfer::Server
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ServerArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::Transfer::Server
:param str resource_name: The name of the resource.
:param ServerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
endpoint_details: Optional[pulumi.Input[pulumi.InputType['ServerEndpointDetailsArgs']]] = None,
endpoint_type: Optional[pulumi.Input[str]] = None,
identity_provider_details: Optional[pulumi.Input[pulumi.InputType['ServerIdentityProviderDetailsArgs']]] = None,
identity_provider_type: Optional[pulumi.Input[str]] = None,
logging_role: Optional[pulumi.Input[str]] = None,
post_authentication_login_banner: Optional[pulumi.Input[str]] = None,
pre_authentication_login_banner: Optional[pulumi.Input[str]] = None,
protocol_details: Optional[pulumi.Input[pulumi.InputType['ServerProtocolDetailsArgs']]] = None,
protocols: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServerProtocolArgs']]]]] = None,
security_policy_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServerTagArgs']]]]] = None,
workflow_details: Optional[pulumi.Input[pulumi.InputType['ServerWorkflowDetailsArgs']]] = None,
__props__=None):
pulumi.log.warn("""Server is deprecated: Server is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServerArgs.__new__(ServerArgs)
__props__.__dict__["certificate"] = certificate
__props__.__dict__["domain"] = domain
__props__.__dict__["endpoint_details"] = endpoint_details
__props__.__dict__["endpoint_type"] = endpoint_type
__props__.__dict__["identity_provider_details"] = identity_provider_details
__props__.__dict__["identity_provider_type"] = identity_provider_type
__props__.__dict__["logging_role"] = logging_role
__props__.__dict__["post_authentication_login_banner"] = post_authentication_login_banner
__props__.__dict__["pre_authentication_login_banner"] = pre_authentication_login_banner
__props__.__dict__["protocol_details"] = protocol_details
__props__.__dict__["protocols"] = protocols
__props__.__dict__["security_policy_name"] = security_policy_name
__props__.__dict__["tags"] = tags
__props__.__dict__["workflow_details"] = workflow_details
__props__.__dict__["arn"] = None
__props__.__dict__["server_id"] = None
super(Server, __self__).__init__(
'aws-native:transfer:Server',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Server':
"""
Get an existing Server resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ServerArgs.__new__(ServerArgs)
__props__.__dict__["arn"] = None
__props__.__dict__["certificate"] = None
__props__.__dict__["domain"] = None
__props__.__dict__["endpoint_details"] = None
__props__.__dict__["endpoint_type"] = None
__props__.__dict__["identity_provider_details"] = None
__props__.__dict__["identity_provider_type"] = None
__props__.__dict__["logging_role"] = None
__props__.__dict__["post_authentication_login_banner"] = None
__props__.__dict__["pre_authentication_login_banner"] = None
__props__.__dict__["protocol_details"] = None
__props__.__dict__["protocols"] = None
__props__.__dict__["security_policy_name"] = None
__props__.__dict__["server_id"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["workflow_details"] = None
return Server(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def certificate(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "certificate")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "domain")
@property
@pulumi.getter(name="endpointDetails")
def endpoint_details(self) -> pulumi.Output[Optional['outputs.ServerEndpointDetails']]:
return pulumi.get(self, "endpoint_details")
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "endpoint_type")
@property
@pulumi.getter(name="identityProviderDetails")
def identity_provider_details(self) -> pulumi.Output[Optional['outputs.ServerIdentityProviderDetails']]:
return pulumi.get(self, "identity_provider_details")
@property
@pulumi.getter(name="identityProviderType")
def identity_provider_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "identity_provider_type")
@property
@pulumi.getter(name="loggingRole")
def logging_role(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "logging_role")
@property
@pulumi.getter(name="postAuthenticationLoginBanner")
def post_authentication_login_banner(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "post_authentication_login_banner")
@property
@pulumi.getter(name="preAuthenticationLoginBanner")
def pre_authentication_login_banner(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "pre_authentication_login_banner")
@property
@pulumi.getter(name="protocolDetails")
def protocol_details(self) -> pulumi.Output[Optional['outputs.ServerProtocolDetails']]:
return pulumi.get(self, "protocol_details")
@property
@pulumi.getter
def protocols(self) -> pulumi.Output[Optional[Sequence['outputs.ServerProtocol']]]:
return pulumi.get(self, "protocols")
@property
@pulumi.getter(name="securityPolicyName")
def security_policy_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "security_policy_name")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "server_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.ServerTag']]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="workflowDetails")
def workflow_details(self) -> pulumi.Output[Optional['outputs.ServerWorkflowDetails']]:
return pulumi.get(self, "workflow_details")
| StarcoderdataPython |
5037060 | <filename>src/feat/utils.py
"""
Utility functions for MEGAN featurization
"""
from typing import Tuple
from rdkit import Chem
from rdkit.Chem.rdchem import Mol
import numpy as np
from src.feat.graph_features import try_get_atom_feature
from src.feat import ATOM_EDIT_TUPLE_KEYS
def fix_incomplete_mappings(sub_mol: Mol, prod_mol: Mol) -> Tuple[Mol, Mol]:
max_map = max(a.GetAtomMapNum() for a in sub_mol.GetAtoms())
max_map = max(max(a.GetAtomMapNum() for a in prod_mol.GetAtoms()), max_map)
for mol in (sub_mol, prod_mol):
for a in mol.GetAtoms():
map_num = a.GetAtomMapNum()
if map_num is None or map_num < 1:
max_map += 1
a.SetAtomMapNum(max_map)
return sub_mol, prod_mol
def add_map_numbers(mol: Mol) -> Mol:
# converting to smiles to mol and again to smiles makes atom order canonical
mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))
map_nums = np.arange(mol.GetNumAtoms()) + 1
np.random.shuffle(map_nums)
for i, a in enumerate(mol.GetAtoms()):
a.SetAtomMapNum(int(map_nums[i]))
return mol
def reac_to_canonical(sub_mol, prod_mol): # converting to smiles to mol and again to smiles makes atom order canonical
sub_mol = Chem.MolFromSmiles(Chem.MolToSmiles(sub_mol))
prod_mol = Chem.MolFromSmiles(Chem.MolToSmiles(prod_mol))
# in RdKit chirality can be marked different depending on order of atoms in molecule list
# here we remap atoms so the map order is consistent with atom list order
map2map = {}
for i, a in enumerate(prod_mol.GetAtoms()):
map2map[a.GetAtomMapNum()] = i + 1
a.SetAtomMapNum(i + 1)
max_map = max(map2map.values())
for i, a in enumerate(sub_mol.GetAtoms()):
m = a.GetAtomMapNum()
if m in map2map:
a.SetAtomMapNum(map2map[m])
else:
max_map += 1
a.SetAtomMapNum(max_map)
return sub_mol, prod_mol
def get_bond_tuple(bond) -> Tuple[int, int, int, int]:
a1, a2 = bond.GetBeginAtom().GetAtomMapNum(), bond.GetEndAtom().GetAtomMapNum()
bt = int(bond.GetBondType())
st = int(bond.GetStereo())
if a1 > a2:
a1, a2 = a2, a1
return a1, a2, bt, st
def atom_to_edit_tuple(atom) -> Tuple:
feat = [try_get_atom_feature(atom, key) for key in ATOM_EDIT_TUPLE_KEYS]
return tuple(feat)
# rdkit has a problem with implicit hs. By default there are only explicit hs.
# This is a hack to fix this error
def fix_explicit_hs(mol: Mol) -> Mol:
for a in mol.GetAtoms():
a.SetNoImplicit(False)
mol = Chem.AddHs(mol, explicitOnly=True)
mol = Chem.RemoveHs(mol)
Chem.SanitizeMol(mol)
return mol
def get_atom_ind(mol: Mol, atom_map: int) -> int:
for i, a in enumerate(mol.GetAtoms()):
if a.GetAtomMapNum() == atom_map:
return i
raise ValueError(f'No atom with map number: {atom_map}')
| StarcoderdataPython |
1779346 | import datetime
from lib.model.database.Database import Database
from sqlalchemy import Column, Integer, String, Date, DateTime, ForeignKey
Base = Database.get_declarative_base()
class AntiEmulator(Base):
__tablename__ = 'anti_emulator'
id = Column(Integer, primary_key=True)
property = Column(String)
real_value = Column(String)
return_value = Column(String)
date = Column(DateTime, default=datetime.datetime.utcnow)
application_id = Column(Integer, ForeignKey('application.id'))
def __init__(self, property, real_value, return_value):
self.property = property
self.real_value = real_value
self.return_value = return_value
def __repr__(self):
return f'<AntiEmulator(id={self.id},property="{self.property}",real_value="{self.real_value}",return_value="{self.return_value}",date="{self.date}")>'
| StarcoderdataPython |
8013373 | <filename>anima/ui/ui_compiled/choose_from_list_dialog_UI_pyqt4.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_files\choose_from_list_dialog.ui'
#
# Created: Thu May 04 11:01:53 2017
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(478, 280)
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.listView = QtGui.QListView(Dialog)
self.listView.setObjectName(_fromUtf8("listView"))
self.horizontalLayout.addWidget(self.listView)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem)
self.pushButton_2 = QtGui.QPushButton(Dialog)
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.verticalLayout_2.addWidget(self.pushButton_2)
self.pushButton = QtGui.QPushButton(Dialog)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout_2.addWidget(self.pushButton)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.listView_2 = QtGui.QListView(Dialog)
self.listView_2.setObjectName(_fromUtf8("listView_2"))
self.horizontalLayout.addWidget(self.listView_2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("Dialog", ">>", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("Dialog", "<<", None, QtGui.QApplication.UnicodeUTF8))
| StarcoderdataPython |
8162838 |
from flask import Flask, request
import pandas as pd
import numpy as np
import os, sys, json, pickle
app = Flask(__name__)
"""Load Model and Scalar files"""
model_file_path = os.path.join(os.path.pardir, os.path.pardir, 'models', 'lr_model.pkl')
scalar_file_path = os.path.join(os.path.pardir, os.path.pardir, 'models', 'lr_scalar.pkl')
mdl_fl_pkl = open(model_file_path,"rb")
scl_fl_pkl = open(scalar_file_path,"rb")
model = pickle.load(mdl_fl_pkl)
scalar = pickle.load(scl_fl_pkl)
columns = [u'Age', u'Fare', u'FamilySize', u'IsMother', u'IsMale',
u'Deck_A', u'Deck_B', u'Deck_C', u'Deck_D', u'Deck_E',
u'Deck_F', u'Deck_G', u'Deck_Z', u'Pclass_1', u'Pclass_2',
u'Pclass_3', u'Title_Lady', u'Title_Master', u'Title_Miss',
u'Title_Mr', u'Title_Mrs', u'Title_Officer', u'Title_Sir',
u'Fare_Bin_very_low', u'Fare_Bin_low', u'Fare_Bin_medium',
u'Fare_Bin_high', u'Embarked_C', u'Embarked_Q', u'Embarked_S',
u'Agestate_Adults', u'Agestate_Child']
@app.route('/api', methods=['POST'])
def make_predictions():
data = json.dumps(request.get_json(force=True))
df = pd.read_json(data)
passenger_ids = df.PassengerId.ravel()
survivals = df.Survived.ravel()
X = df[columns].as_matrix().astype('float')
Xsc = scalar.transform(X)
p = model.predict(Xsc)
df_response = pd.DataFrame({"PassengerId":passenger_ids, "Predicted":p, "Actuals":survivals})
return df_response.to_json()
name = data['name']
return "Hello to API World : {0}".format(name)
if __name__ == "__main__":
app.run(port=8293, debug=True) | StarcoderdataPython |
8191623 | <reponame>albertvisser/filefindr
"""Gui-toolkit onafhankelijke code t.b.v. Afrift applicaties
opgezet als classes die de toolkit-dependent code aanroepen als methodes op een attribuut ervan
ze worden geïmporteerd via een aparte module die bepaalt welke toolkit er gebruikt wordt
"""
import os
import collections
import subprocess
import json
import logging
import pathlib
from .gui import SelectNamesGui, ResultsGui, MainFrameGui
from .findr_files import Finder, format_result
BASE = pathlib.Path.home() / '.afrift'
if not BASE.exists():
BASE.mkdir()
HERE = pathlib.Path(__file__).parent # os.path.dirname(__file__)
LOGFILE = HERE.parent / 'logs' / 'afrift.log'
WANT_LOGGING = 'DEBUG' in os.environ and os.environ["DEBUG"] != "0"
if WANT_LOGGING:
if not LOGFILE.parent.exists():
LOGFILE.parent.mkdir()
if not LOGFILE.exists():
LOGFILE.touch()
logging.basicConfig(filename=str(LOGFILE), level=logging.DEBUG,
format='%(asctime)s %(message)s')
common_path_txt = 'De bestanden staan allemaal in of onder de directory "{}"'
iconame = str(HERE / "find.ico") # os.path.join(HERE, "find.ico")
def log(message):
"output to log"
if WANT_LOGGING:
logging.info(message)
def get_iniloc(path=None):
"""determine location & filenames for stored settings
if given, input should be an absolute path
"""
path = pathlib.Path(path) if path else pathlib.Path.cwd()
if path == pathlib.Path.home():
here = str(path)[1:]
else:
try:
here = '~' + str(path.relative_to(pathlib.Path.home()))
except ValueError:
here = str(path.resolve())[1:]
iniloc = BASE / here.replace('/', '_')
mrufile = iniloc / 'mru_items.json'
optsfile = iniloc / 'options.json'
return iniloc, mrufile, optsfile
class SelectNames():
"""Tussenscherm om te verwerken files te kiezen
deze class bevat methoden die onafhankelijk zijn van de gekozen
GUI-toolkit
"""
def __init__(self, parent, files=True):
self.do_files = files
self.parent = parent
self.title = self.parent.title + " - file list"
self.iconame = iconame
if files:
text = "Selecteer de bestanden die je *niet* wilt verwerken"
self.names = {str(x): x for x in self.parent.names}
else:
text = "Selecteer de directories die je *niet* wilt verwerken"
self.gui = SelectNamesGui(parent, self)
captions = {'heading': text, 'sel_all': 'Select/Unselect All', 'invert': 'Invert selection',
'exit': "&Terug", 'execute': "&Klaar"}
self.gui.setup_screen(captions)
def show(self):
"""show the dialog screen
"""
return self.gui.go(), self.names
class Results():
"""Show results on screen
deze class bevat methoden die onafhankelijk zijn van de gekozen
GUI-toolkit
"""
def __init__(self, parent, common_path=''):
self.parent = parent
self.common = common_path
self.show_context = self.parent.p["context"]
self.results = []
self.titel = 'Regel' if self.parent.apptype == "single" else 'File/Regel'
self.iconame = iconame
self.gui = ResultsGui(parent, self)
self.label_only = self.parent.p['vervang'] and self.parent.apptype == 'single'
if self.label_only:
aantal = self.parent.zoekvervang.rpt[1].split(None, 1)[1]
label_txt = self.parent.zoekvervang.rpt[0]
label_txt = label_txt.replace('vervangen', aantal + ' vervangen')
else:
label_txt = "{} ({} items)".format(self.parent.zoekvervang.rpt[0],
len(self.parent.zoekvervang.rpt) - 1)
if self.parent.apptype == "multi":
label_txt += '\n' + common_path_txt.format(self.common.rstrip(os.sep))
captions = {'heading': label_txt, 'ctxt': 'Context', 'txt': 'Tekst', 'hlp': 'Help',
'rslt': '&Goto Result', 'exit': "&Klaar", 'rpt': "&Repeat Search",
'cpy': "Copy to &File", 'clp': "Copy to &Clipboard",
'alt': '&Zoek anders', 'sel': 'Vervang in &Selectie', 'all': 'Vervang &Alles',
'fmt': 'Formatteer output:',
'pth': "toon directorypad", 'dlm': "comma-delimited", 'sum': "summarized"}
self.build_list()
self.gui.setup_screen(captions)
def build_list(self):
"construct list of results"
for ix, line in enumerate(self.parent.zoekvervang.rpt):
if ix == 0:
kop = line
elif line != "":
where, what = line.split(": ", 1)
if self.parent.apptype == "single":
if "r. " in where:
where = where.split("r. ", 1)[1]
else:
where = ""
if self.common and self.common != '/':
where = where.replace(str(self.common), "")
if self.show_context:
where, rest = where.rsplit(' (', 1)
context = rest.split(')')[0]
self.results.append((where, context, what))
else:
self.results.append((where, what))
self.results.insert(0, kop)
def show(self):
"""show the dialog screen
"""
self.gui.go()
def get_results(self):
"""format output
"""
toonpad = self.gui.get_pth()
comma = self.gui.get_csv()
context = self.gui.get_sum()
text = ["{}".format(self.results[0])]
if self.parent.apptype == "multi" and not toonpad:
text.append(common_path_txt.format(self.common))
text.append("")
if comma:
import io
import csv
textbuf = io.StringIO()
writer = csv.writer(textbuf, dialect='unix')
header = [('Path/file' if toonpad else 'File'), 'Line', 'Context', 'Result']
for item in self.results[1:]:
result = list(item)
if self.parent.apptype == 'single':
result[0] = ' r. ' + result[0]
if toonpad and (self.parent.apptype == 'multi' or comma):
result[0] = self.common + result[0]
if comma:
loc, line = result[0].rsplit(' r. ', 1)
result[:1] = [loc, line]
if header and len(header) > len(result):
header[2:] = header[3:]
if self.parent.apptype == 'single' and not toonpad:
result = result[1:]
if header:
header = header[1:]
if header:
writer.writerow(header)
header = None
writer.writerow(result)
else:
text.append(" ".join(result).strip())
if comma:
text += textbuf.getvalue().split("\n")
textbuf.close()
if context:
context = 'py' if self.show_context else None
if self.parent.apptype == 'single':
text = ['{} {}'.format(self.parent.p['filelist'][0], x) if x else '' for x in text]
text = format_result(text, context)
if self.parent.apptype == 'single' and not toonpad:
text = [x.replace(str(self.parent.p['filelist'][0]), '', 1).strip() for x in text]
return text
def refresh(self, *args, **kwargs):
"""repeat search and show new results
"""
self.results = []
self.gui.clear_contents()
self.parent.zoekvervang.rpt = ["".join(self.parent.zoekvervang.specs)]
self.parent.gui.set_waitcursor(True)
self.parent.zoekvervang.go()
self.parent.gui.set_waitcursor(False)
if len(self.parent.zoekvervang.rpt) == 1:
self.gui.breekaf("Niks gevonden", done=False)
return
elif len(self.parent.zoekvervang.rpt) == 2 and self.parent.zoekvervang.p['wijzig']:
count_txt = self.parent.zoekvervang.rpt.pop().split(': ')[-1]
else:
count_txt = '{} items'.format(len(self.parent.zoekvervang.rpt) - 1)
label_txt = ''
replcount = kwargs.get('replace_count', '')
if replcount:
srch = self.parent.zoekvervang.p['zoek']
repl = kwargs.get('replace_text', '')
label_txt = '`{}` with `{}` replaced {} in lines\n'.format(srch, repl, replcount)
label_txt += "{} ({})".format(self.parent.zoekvervang.rpt[0], count_txt)
if self.parent.apptype == "multi":
label_txt += '\n' + common_path_txt.format(self.common)
self.gui.set_header(label_txt)
self.build_list()
self.gui.populate_list()
def check_option_combinations_ok(self):
"""onzinnige combinatie(s) uitsluiten
"""
title, msg = (self.parent.title,
"Summarize to comma delimited is not a sensible option, request denied")
if self.gui.get_sum() and self.gui.get_csv():
self.gui.meld(title, msg)
return False
return True
def kopie(self, *args):
"""callback for button 'Copy to file'
"""
if not self.check_option_combinations_ok():
return
f_nam = self.parent.p["zoek"]
for char in '/\\?%*:|"><.':
if char in f_nam:
f_nam = f_nam.replace(char, "~")
if self.gui.get_csv():
ext = '.csv'
else:
ext = '.txt'
f_nam = f_nam.join(("files-containing-", ext))
savename = self.gui.get_savefile(f_nam, ext)
if savename:
self.gui.remember_settings()
with open(savename, "w") as f_out:
for line in self.get_results():
f_out.write(line + "\n")
def help(self):
"""show instructions
"""
self.gui.meld('info', "Select a line and doubleclick or press Ctrl-G to open the"
" indicated file\nat the indicated line (not in single file mode)")
def to_clipboard(self, *args):
"""callback for button 'Copy to clipboard'
"""
if self.check_option_combinations_ok():
self.gui.remember_settings()
self.gui.copy_to_clipboard('\n'.join(self.get_results()) + '\n')
def goto_result(self, row, col):
"""open the file containing the selected item
"""
if self.parent.apptype == 'single':
self.gui.meld('ahem', 'Not in single file mode')
return
selected = self.results[row + 1]
target, line = selected[0].split(' r. ')
target = self.common + target
prog, fileopt, lineopt = self.parent.editor_option
subprocess.run([prog, fileopt.format(target), lineopt.format(line)])
def vervang_in_sel(self, *args):
"achteraf vervangen in geselecteerde regels"
# bepaal geselecteerde regels
items = self.gui.get_selection()
if not items:
self.gui.meld(self.parent.resulttitel, 'Geen regels geselecteerd om in te vervangen')
return
lines_to_replace = [x.split(' r. ') for x in items]
prompt = 'vervang `{}` in geselecteerde regels door:'.format(self.parent.p['zoek'])
text, ok = self.gui.get_text_from_user(self.parent.resulttitel, prompt)
if ok:
replaced = self.parent.zoekvervang.replace_selected(text, lines_to_replace)
# self.parent.zoekvervang.setup_search() -- is dit nodig als het niet wijzigt?
self.refresh(replace_text=text, replace_count=replaced)
def vervang_alles(self, *args):
"achteraf vervangen in alle regels"
prompt = 'vervang `{}` in alle regels door:'.format(self.parent.p['zoek'])
text, ok = self.gui.get_text_from_user(self.parent.resulttitel, prompt)
if ok:
self.parent.zoekvervang.p['vervang'] = text
self.parent.zoekvervang.p['wijzig'] = True
self.parent.zoekvervang.setup_search()
self.refresh()
def zoek_anders(self, *args):
"zoek naar iets anders in dezelfde selectie"
origzoek = self.parent.zoekvervang.p['zoek']
prompt = 'zoek in dezelfde selectie naar:'
text, ok = self.gui.get_text_from_user(self.parent.resulttitel, prompt)
if ok:
self.parent.zoekvervang.p['zoek'] = text
self.parent.zoekvervang.setup_search()
self.refresh()
print('In zoek_anders: origzoek terugzetten naar', origzoek)
self.parent.zoekvervang.p['zoek'] = origzoek
self.parent.zoekvervang.setup_search()
class MainFrame():
"""Hoofdscherm van de applicatie
deze class bevat methoden die onafhankelijk zijn van de gekozen
GUI-toolkit
"""
def __init__(self, **kwargs):
"""attributen die altijd nodig zijn
"""
log('in MainFrame.init: cwd is {}'.format(pathlib.Path.cwd()))
log(' kwargs is {}'.format(kwargs))
self.apptype = kwargs.pop('apptype', '')
fnaam = kwargs.pop('fnaam', '')
flist = kwargs.pop('flist', None)
self.title = "Albert's find-replace in files tool"
self.iconame = iconame
self.fouttitel = self.title + "- fout"
self.resulttitel = self.title + " - Resultaten"
# self.apptype = apptype
self.hier = pathlib.Path.cwd() # os.getcwd()
self.mru_items = {"zoek": [], "verv": [], "types": [], "dirs": []}
self.save_options_keys = (("case", 'case_sensitive'), ("woord", 'whole_words'),
("subdirs", 'recursive'), ("context", 'python_context'),
("negeer", 'ignore_comments'))
self.outopts = {'full_path': False, 'as_csv': False, 'summarize': False}
self.screen_choices = {'regex': False, 'case': False, 'woord': False,
'subdirs': False, 'follow_symlinks': False, 'select_subdirs': False,
'select_files': False, 'context': False, 'negeer': False,
'dont_save': False, 'no_gui': False,
'output_file': False, 'full_path': False, 'as_csv': False,
'summarize': False}
# het idee achter bovenstaande dict is om alle keuzes op het scherm te verzamelen
# en ze eerst vanuit de opgeslagen waarden en daarna vanuit de
# opgegeven startup-opties te vullen - zie ook onderstaande captions en read_kwargs()
fnpath = pathlib.Path(fnaam).expanduser().resolve()
if self.apptype == "" and fnpath.exists() and not fnpath.is_dir():
self.apptype = 'single'
self.p = {'filelist': self.get_filename_list(fnaam, fnpath, flist)}
self.s = ""
self.setup_options()
self.extraopts = collections.defaultdict(lambda: False)
self.apply_cmdline_options(kwargs)
self.gui = MainFrameGui(self)
captions = {'vraag_zoek': 'Zoek naar:', 'regex': "regular expression (Python format)",
'case': "hoofd/kleine letters gelijk", 'woord': "hele woorden",
'vraag_verv': 'Vervang door:', 'empty': "lege vervangtekst = weghalen",
'zoek': "&Zoek", 'in': "In directory:", 'in_s': "In file/directory:",
'in_m': "In de volgende files/directories:",
'subs_m': "van geselecteerde directories ",
'subs': "ook subdirectories doorzoeken",
'link': "symlinks volgen - max. diepte (-1 is alles):",
'skipdirs': "selecteer (sub)directories om over te slaan",
'skipfiles': "selecteer bestanden om over te slaan",
'ftypes': "Alleen files van type:",
'context': "context tonen (waar mogelijk, anders overslaan)",
'negeer': "commentaren en docstrings negeren",
'backup': "gewijzigd(e) bestand(en) backuppen",
'exit': "direct afsluiten na vervangen", 'exec': '&Uitvoeren',
'end': '&Einde'}
self.gui.setup_screen(captions)
if self.extraopts['no_gui']:
self.doe()
else:
self.gui.go()
def get_filename_list(self, fn_orig, fnaam, flist):
"""determine the files to search in
"""
fnames = []
if self.apptype == "":
if fn_orig:
fnames = [fnaam]
elif self.apptype == "single":
self.title += " - single file version"
if not fn_orig:
raise ValueError('Need filename for application type "single"')
fnames = [fnaam]
elif self.apptype == "multi":
self.title += " - multi-file version"
if fn_orig:
if fnaam.is_dir():
fnames = [fnaam]
else:
with fnaam.open() as f_in:
for line in f_in:
line = line.strip()
if line.endswith("\\") or line.endswith("/"):
line = line[:-1]
line = pathlib.Path(line).expanduser().resolve()
fnames.append(line)
elif flist:
fnames = [pathlib.Path(x) for x in flist]
else:
raise ValueError('Need filename or list of files for application '
'type "multi"')
else:
raise ValueError('application type should be empty, "single" or "multi"')
return fnames
def setup_options(self):
"""update self.p with default and other options
"""
for key in [x[0] for x in self.save_options_keys]:
self.p[key] = False
if self.p['filelist']:
if self.apptype == 'single':
self.read_from_ini(self.p['filelist'][0].parent)
elif self.apptype == 'multi':
test = os.path.commonpath([str(x) for x in self.p['filelist']])
self.read_from_ini(os.path.abspath(test))
else:
self.read_from_ini(self.p['filelist'][0])
else:
self.read_from_ini()
encfile = BASE / 'fallback_encoding'
try:
test = encfile.read_text()
except FileNotFoundError:
test = 'latin-1\n'
encfile.write_text(test)
self.p['fallback_encoding'] = test.strip()
edfile = BASE / 'open_result'
try:
test = edfile.read_text()
except FileNotFoundError:
test = '\n'.join(("program = 'SciTE'",
"file-option = '-open:{}'",
"line-option = '-goto:{}'",
""))
edfile.write_text(test)
self.editor_option = [x.split(' = ')[1].strip("'")
for x in test.strip().split('\n')]
self.always_replace = False
self.maak_backups = True
self.exit_when_ready = False
def read_from_ini(self, path=None):
"""lees ini file (met eerder gebruikte zoekinstellingen)
als geen settings file of niet te lezen dan initieel laten
"""
loc, mfile, ofile = get_iniloc(path)
if loc.exists():
with mfile.open() as _in:
self.mru_items = json.load(_in)
with ofile.open() as _in:
opts = json.load(_in)
for key, value in self.outopts.items():
self.outopts[key] = opts.pop(key, '') or value
for key in [x[0] for x in self.save_options_keys]:
self.p[key] = opts.pop(key, '') or value
def apply_cmdline_options(self, cmdline_options):
"""lees settings opties vanuit invoer; override waar opgegeven
"""
self.p['zoek'] = cmdline_options.pop('search', '')
test = cmdline_options.pop('replace', None)
if test is not None:
self.p['vervang'] = test
if test == '':
self.always_replace = True
self.p["extlist"] = cmdline_options.pop('extensions', '')
if not self.p["extlist"]:
self.p["extlist"] = []
# saved_options alleen toepassen als use-saved is opgegeven?
self.extraopts['use_saved'] = cmdline_options.pop('use_saved', False)
if not self.extraopts['use_saved']:
for key, argname in self.save_options_keys:
self.p[key] = cmdline_options.pop(argname, self.p.get(key, False))
for arg in ('regex', 'follow_symlinks', 'select_subdirs', 'select_files',
'dont_save', 'no_gui', 'output_file', 'full_path', 'as_csv', 'summarize'):
if arg in self.outopts:
self.outopts[arg] = cmdline_options.pop(arg, '') or self.outopts[arg]
else:
self.extraopts[arg] = cmdline_options.pop(arg, '')
self.maak_backups = cmdline_options.pop('backup_originals', '')
self.exit_when_ready = True # altijd aan?
def write_to_ini(self, path=None):
"""huidige settings toevoegen dan wel vervangen in ini file
indien opgegeven op de cmdline, dan niet onthouden (zie self.cmdline_options)
"""
if self.extraopts['dont_save']:
return
loc, mfile, ofile = get_iniloc(path)
if not loc.exists():
loc.mkdir()
with mfile.open("w") as _out:
json.dump(self.mru_items, _out, indent=4)
opts = {key: self.p[key] for key, argname in self.save_options_keys}
opts.update(self.outopts)
with ofile.open("w") as _out:
json.dump(opts, _out, indent=4)
def determine_common(self):
"""determine common part of filenames
"""
if self.apptype == 'single':
test = self.p['filelist'][0]
elif self.apptype == 'multi':
test = os.path.commonpath([str(x) for x in self.p['filelist']])
## if test in self.p['filelist']:
## pass
## else:
## while test and not os.path.exists(test):
## test = test[:-1]
# make sure common part is a directory
if os.path.isfile(test):
test = os.path.dirname(test) + os.sep
else:
test += os.sep
else:
test = str(self.p["pad"]) + os.sep
return test
def checkzoek(self, item):
"controleer zoekargument"
if not item:
mld = "Kan niet zoeken zonder zoekargument"
else:
mld = ""
try:
self.mru_items["zoek"].remove(item)
except ValueError:
pass
self.mru_items["zoek"].insert(0, item)
self.s += "zoeken naar {0}".format(item)
self.p["zoek"] = item
return mld
def checkverv(self, items):
"controleer vervanging"
mld = ""
self.p["vervang"] = None
vervang, leeg = items
if vervang:
try:
self.mru_items["verv"].remove(vervang)
except ValueError:
pass
self.mru_items["verv"].insert(0, vervang)
self.s = "\nen vervangen door {0}".format(vervang)
self.p["vervang"] = vervang
elif leeg:
self.s += "\nen weggehaald"
self.p["vervang"] = ""
return mld
def checkattr(self, items):
"controleer extra opties"
mld = ""
regex, case, words = items
opts = []
if regex:
opts.append("regular expression")
self.p["regexp"] = regex
if case:
opts.append("case-sensitive")
self.p["case"] = case
if words:
opts.append("hele woorden")
self.p["woord"] = words
if opts:
self.s += " ({})".format(', '.join(opts))
return mld
def checktype(self, item):
"controleer speciale bestandstypen (extensies)"
mld = ""
if item:
try:
self.mru_items["types"].remove(item)
except ValueError:
pass
self.mru_items["types"].insert(0, item)
self.s += "\nin bestanden van type {0}".format(item)
exts = item.split(",")
self.p["extlist"] = [x.lstrip().strip() for x in exts]
else:
self.p["extlist"] = []
return mld
def checkpath(self, item):
"controleer zoekpad"
test = pathlib.Path(item)
if not item:
mld = ("Ik wil wel graag weten in welke directory ik moet "
"(beginnen met) zoeken")
elif not test.exists(): # pathlib.Path(item).exists():
mld = "De opgegeven directory bestaat niet"
else:
mld = ""
try:
self.mru_items["dirs"].remove(item)
except ValueError:
pass
self.mru_items["dirs"].insert(0, item)
self.s += "\nin {0}".format(item)
self.p["pad"] = test # item
self.p['filelist'] = ''
return mld
def checksubs(self, items):
"subdirs aangeven"
subdirs, links, depth = items
if subdirs:
self.s += " en onderliggende directories"
self.p["subdirs"] = subdirs
self.p["follow_symlinks"] = links
self.p["maxdepth"] = depth
def doe(self):
"""Zoekactie uitvoeren en resultaatscherm tonen"""
item = self.gui.get_searchtext()
mld = self.checkzoek(item)
if not mld:
self.checkverv(self.gui.get_replace_args())
self.checkattr(self.gui.get_search_attr())
# volgens qt versie
if self.apptype != "single" or self.p['filelist'][0].is_dir():
self.checktype(self.gui.get_types_to_search())
# volgens wx versie
# try:
# typelist = self.gui.get_types_to_search()
# except AttributeError:
# typelist = None
# if typelist:
# self.checktype(typelist)
if not self.apptype:
mld = self.checkpath(self.gui.get_dir_to_search())
if not mld:
# volgens qt versie
if self.apptype != "single" or self.p['filelist'][0].is_dir():
self.checksubs(self.gui.get_subdirs_to_search())
elif self.apptype == "single" and self.p['filelist'][0].is_symlink():
self.p["follow_symlinks"] = True
# volgens wx versie
# try:
# self.checksubs(self.gui.get_subdirs_to_search())
# except aAttributeError:
# pass
self.p["backup"] = self.gui.get_backup()
self.p["negeer"] = self.gui.get_ignore()
self.p["context"] = self.gui.get_context()
if mld:
self.gui.error(self.fouttitel, mld)
return
self.gui.add_item_to_searchlist(item)
if not self.extraopts['dont_save']:
loc = self.p.get('pad', '') or str(self.p['filelist'][0].parent)
self.write_to_ini(os.path.abspath(loc))
self.zoekvervang = Finder(**self.p)
self.zoekvervang.setup_search()
if not self.zoekvervang.ok:
self.gui.meld(self.resulttitel, '\n'.join(self.zoekvervang.rpt),
self.zoekvervang.errors)
return
if not self.zoekvervang.filenames:
self.gui.meld(self.resulttitel, "Geen bestanden gevonden")
return
common_part = self.determine_common()
if self.apptype == "single" or (
len(self.p['filelist']) == 1 and self.p['filelist'][0].is_file()):
pass
else:
skip_dirs = self.gui.get_skipdirs()
skip_files = self.gui.get_skipfiles()
go_on = skip_dirs or skip_files
canceled = False
while go_on:
if skip_dirs:
# eerste ronde: toon directories
if self.zoekvervang.dirnames:
self.names = sorted(self.zoekvervang.dirnames)
result = SelectNames(self, files=False).show()
if not result:
canceled = True
break
fnames = self.zoekvervang.filenames[:]
for entry in fnames:
for name in self.names:
# if str(entry).startswith(name + '/'):
if entry.parent == name:
self.zoekvervang.filenames.remove(entry)
break
if not skip_files:
go_on = False
if skip_files:
# tweede ronde: toon de files die overblijven
self.names = sorted(self.zoekvervang.filenames) # , key=lambda x: str(x))
result, names = SelectNames(self).show()
if not result and not skip_dirs:
canceled = True
break
elif result:
self.zoekvervang.filenames = names
go_on = False
if canceled:
return
self.gui.set_waitcursor(True)
self.zoekvervang.go() # do_action() # search_python=self.p["context"])
self.gui.set_waitcursor(False)
self.noescape = True # wx versie: switch tbv afsluiten dialoog met Escape
if len(self.zoekvervang.rpt) == 1:
if self.extraopts['output_file']:
print('No results')
else:
mld = "Niks gevonden" if self.zoekvervang.ok else self.zoekvervang.rpt[0]
self.gui.meld(self.resulttitel, mld)
else:
dlg = Results(self, common_part)
if self.extraopts['output_file']:
with self.extraopts['output_file'] as f_out:
for line in dlg.get_results():
f_out.write(line + "\n")
else:
dlg.show()
if (self.extraopts['no_gui'] and self.extraopts['output_file']) or (
self.gui.get_exit() and self.p["vervang"] is not None):
self.gui.einde()
| StarcoderdataPython |
6671367 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from contextlib import contextmanager
LOG = logging.getLogger(__name__)
class HealthCheckError(Exception):
def __init__(self, service, extra_message=None):
msg = "HealthCheckError: Service {} is experiencing issues.".format(
service
)
if extra_message:
msg += " " + extra_message
super(HealthCheckError, self).__init__(msg)
class RedisNotWorking(Exception):
pass
class KeyAlreadyLocked(Exception):
pass
class SimpleHealthCheck(object):
def __init__(
self, health_check_url, service_key, redis_client, http_client,
http_request_exceptions, service_ok_status=200,
service_ok_response='WORKING', health_check_request_timeout=2,
health_check_ttl=60, verify_ssl=True
):
self._health_check_url = health_check_url
self._service_key = service_key
self._service_ok_status = service_ok_status
self._service_ok_response = service_ok_response
self._health_check_request_timeout = health_check_request_timeout
self._health_check_ttl = health_check_ttl
self._redis_client = redis_client
self._http_client = http_client
self._http_request_exceptions = http_request_exceptions
self._verify_ssl = verify_ssl
@property
def _service_ok_key(self):
return 'ok_' + self._service_key
@property
def _service_failed_key(self):
return 'failed_' + self._service_key
@property
def _service_ok_lock_key(self):
return 'service_ok_lock_' + self._service_key
@property
def _service_failed_lock_key(self):
return 'service_failed_lock_' + self._service_key
def _get_key_value(self, key):
try:
return self._redis_client.get(key)
except Exception as e:
LOG.warn(e)
raise RedisNotWorking(e)
def _set_key_with_expire(self, key, timeout, value=1):
try:
self._redis_client.setex(key, value, timeout)
except Exception as e:
LOG.warn(e)
raise RedisNotWorking(e)
def _make_http_request(self):
return self._http_client.get(
self._health_check_url,
timeout=self._health_check_request_timeout,
verify=self._verify_ssl
)
@contextmanager
def _acquire_lock(self, key):
lock = self._redis_client.lock(
key, timeout=self._health_check_ttl
)
try:
is_locked = lock.acquire(blocking=False)
except Exception as e:
LOG.warn(e)
raise RedisNotWorking(e)
else:
if is_locked:
yield True
else:
raise KeyAlreadyLocked(
"Key {} is already locked!".format(key)
)
finally:
if 'is_locked' in locals() and is_locked:
try:
lock.release()
except Exception as e:
LOG.warn(e)
error_message = "Error while releasing lock for key {}, wait for {}s.".format(
key, self._health_check_ttl
)
LOG.warn(error_message)
def _handle_service_is_down(self, error_message=None):
try:
with self._acquire_lock(self._service_failed_lock_key):
self._set_key_with_expire(
self._service_failed_key, self._health_check_ttl
)
except KeyAlreadyLocked as e:
self.check_service()
else:
raise HealthCheckError(
service=self._health_check_url,
extra_message=error_message
)
def _handle_service_is_up(self, service_response):
status_is_ok = self._service_ok_status == service_response.status_code
content_is_ok = self._service_ok_response == service_response.content
if status_is_ok and content_is_ok:
try:
with self._acquire_lock(self._service_ok_lock_key):
self._set_key_with_expire(
self._service_ok_key, self._health_check_ttl
)
except KeyAlreadyLocked as e:
self.check_service()
else:
error_message = "Expecting status {} and content {}, but received {} and {}".format(
self._service_ok_status, self._service_ok_response,
service_response.status_code, service_response.content
)
self._handle_service_is_down(error_message)
def _is_service_up(self):
return self._get_key_value(self._service_ok_key)
def _is_service_down(self):
return self._get_key_value(self._service_failed_key)
def check_service(self):
if self._is_service_up():
return True
if self._is_service_down():
raise HealthCheckError(
service=self._health_check_url,
extra_message="Waiting to check again..."
)
try:
service_response = self._make_http_request()
except self._http_request_exceptions as e:
LOG.warn(e)
self._handle_service_is_down(str(e))
else:
self._handle_service_is_up(service_response)
return True
| StarcoderdataPython |
3317782 | def read_file():
file_name = input("Введите имя файла(по умолчанию referat.txt): ") or "referat.txt"
with open(file_name, 'r', encoding='utf-8') as var:
content = var.read()
return content
with open('referat.txt', 'r', encoding='utf-8') as var01:
result = var01.read()
print('Длинна строки: {}'.format(len(result)))
print('Количество слов: {}'.format(len(result.split())))
print('Длинна строки: {}'.format(len(result)))
var01.close()
with open('referat2.txt', 'w', encoding='utf-8') as var02:
content = read_file()
result = content.replace('.','!')
var02.write(result)
var02.close()
| StarcoderdataPython |
8020702 | <reponame>jt7960/PFRScraper
##IMPORTS##
import requests
from bs4 import BeautifulSoup as bs
import re
import pyodbc
import pandas as pd
##DATABASE CONNECTION##
#1 establish connection to the database
#2 create a cursor (which facilitates db transactions)
cnxn = pyodbc.connect('DSN=NFLStats;') # This requires valid database connection via DNS.
crsr = cnxn.cursor()
##SCRAPE##
#Base URL
baseURL = 'https://pro-football-reference.com' #trailing / left off because href text is returning it to start the string
##Scrape Team Names##
alphabet = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
r = requests.get(f'{baseURL}teams/')
page = bs(r.text, 'html.parser')\
team_dict = {}
for link in page.find_all('a', attrs={'href': re.compile("^/teams/")}):
if link.string.find(" ") > 0: # this returns a set of just team names and city + team names, this filters to just city + names
team_dict[link.string] = link.get('href')[7:10]
#print(team_dict)
##Scrape Box Scores##
#After testing 1 week and season, create a nested for loop and do all the weeks and seasons!!
season = '2019'
week = '1'
r = requests.get(f'{baseURL}/years/{season}/week_{week}.htm')
page = bs(r.text, 'html.parser')
for link in page.find_all(href=re.compile(f"/boxscores/{season}")):
print(link.get('href'))
| StarcoderdataPython |
11336146 | #!/usr/bin/python3
"""Filter from hsv image to black-white image by color thesholds"""
import cv2
import numpy as np
def cv2_hsv_color_range_filter(hvs_image, filter_parameters):
"""HVS color range filter"""
if "lower_color" not in filter_parameters or "upper_color" not in filter_parameters:
raise ValueError("Color range was not specified for HVS color range filter.")
lower_threshold = filter_parameters["lower_color"]
upper_threshold = filter_parameters["upper_color"]
lower_color = _get_color_from_parameter(lower_threshold)
upper_color = _get_color_from_parameter(upper_threshold)
return cv2.inRange(hvs_image, lower_color, upper_color)
# color_parameter_string = "255,255,255"
def _get_color_from_parameter(color_parameter_string):
colour_list_string = color_parameter_string.split(',') # ["255", "255", "255"]
colour_list = list(map(int, colour_list_string))
return np.array(colour_list)
| StarcoderdataPython |
4981097 | import model
import http.client
class HTTPUnexpected(Exception):
"""Raised whenever an unexpected response is encountered."""
def __init__(self, status: int, error: str | None = None) -> None:
message = error or f"Endpoint returned HTTP {status} {http.client.responses[status]}."
super().__init__(message)
def setup(bot: model.Bakerbot) -> None:
pass
| StarcoderdataPython |
6576210 | import torch
import torch.nn as nn
import torch.nn.functional as F
# module for childsumtreelstm
class ChildSumTreeLSTM(nn.Module):
def __init__(self, in_dim, mem_dim):
super().__init__()
self.in_dim = in_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
def node_forward(self, inputs, child_c, child_h):
child_h_sum = torch.sum(child_h, dim=0, keepdim=True)
iou = self.ioux(inputs) + self.iouh(child_h_sum)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)
f = F.tanh(
self.fh(child_h) +
self.fx(inputs).repeat(len(child_h), 1)
)
fc = torch.mul(f, child_c)
c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
h = torch.mul(o, F.tanh(c))
return c, h
def forward(self, tree, inputs):
for idx in range(len(tree.children)):
self.forward(tree.children[idx], inputs)
if len(tree.children) == 0:
child_c = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()
child_h = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()
else:
child_c, child_h = zip(*map(lambda x: x.state, tree.children))
child_c, child_h = torch.cat(child_c, dim=0), torch.cat(child_h, dim=0)
tree.state = self.node_forward(inputs[tree.value], child_c, child_h)
return tree.state
| StarcoderdataPython |
6579100 | import random
from blackjack.models import Card
class Deck:
"""Represents a deck of playing cards"""
def __init__(self) -> None:
self.new_deck()
@property
def cards(self) -> list[Card]:
"""The cards in the deck."""
return self._cards
def new_deck(self) -> None:
"""Generates a new deck of 52 cards and shuffles them."""
self._cards: list[Card] = []
for r in Card.RANKS:
self.cards.extend(Card(r, s) for s in Card.SUITS)
self.shuffle()
def shuffle(self) -> None:
"""Shuffles the deck."""
random.shuffle(self.cards)
def draw(self) -> Card:
"""Draws one card from the deck."""
return self.cards.pop()
def draw_many(self, amount: int) -> list[Card]:
"""Draws multiple cards from the deck."""
drawn = self.cards[-amount:]
self._cards = self.cards[:-amount]
return drawn
def __repr__(self) -> str:
"""Representation of a deck."""
return "\n".join(str(c) for c in self.cards)
| StarcoderdataPython |
11370576 | <filename>src/engine.py
import torch
import torchvision.transforms as T
from torch.utils.data import DataLoader
from train import *
from val import *
from model import *
from evaluation_metrics import *
from datasets import *
# collate_fn is called with a list of data samples at each time.
# It is expected to collate the input samples into a batch for
# yielding from the data loader iterator.
# https://discuss.pytorch.org/t/how-to-use-collate-fn/27181
def collate_fn(batch):
return tuple(zip(*batch))
def engine(device, model_path=None, init_epoch=None, resume=False):
'''
Main funtion to train and validate.
Args:
device: device for computation.
model_path: path of saved model.
init_epoch: initial epoch to resume training from.
resume: to resume training from last epoch.
Return:
final_score
'''
final_score = []
best_score = 0
# Custom DataLoaders
train_dataset = dataset(df_train, transforms=T.Compose([T.ToTensor()]))
valid_dataset = dataset(df_val, train=False, transforms=T.Compose([T.ToTensor()]))
train_loader = DataLoader(train_dataset,
BATCH_SIZE,
shuffle=False,
num_workers=8,
collate_fn=collate_fn)
val_loader = DataLoader(valid_dataset,
BATCH_SIZE,
shuffle=False,
num_workers=8,
collate_fn=collate_fn, )
if resume:
model = torch.load(model_path)
init_epoch = init_epoch
else:
model = get_model(2)
init_epoch = 0
model.to(device) # loading model on GPU
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.002, momentum=0.9, weight_decay=0.0007)
for epoch in range(init_epoch, EPOCHS):
'''
Call the train function then validation function to take a look on how
model is performed in that epoch. Output of val_fn, prediction will be
given to evaluation metrics for getting score.
'''
train_loss = train_fn(train_loader, epoch, model, optimizer, device)
prediction = val_fn(val_loader, model, device, display_random=True)
valid_score = calculate_final_score(prediction, 0.5, 'pascal_voc')
if valid_score > best_score:
best_score = valid_score
torch.save(model.state_dict(), f'frcnn_best_{epoch}.pth')
# torch.save(model, f'frcnn_best_model_epoch_{epoch}')
final_score.append([best_score, epoch])
return final_score
| StarcoderdataPython |
398209 | <reponame>revl/pants
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.base.workunit import WorkUnitLabel
from pants.task.testrunner_task_mixin import TestRunnerTaskMixin
from pants.util.contextutil import pushd
from pants.util.process_handler import SubprocessProcessHandler
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.node_task import NodeTask
class NodeTest(TestRunnerTaskMixin, NodeTask):
"""Runs a test script from package.json in a NodeModule, currently via "npm run [script name]".
Implementations of abstract methods from TestRunnerTaskMixin: _execute, _spawn,
_test_target_filter, _validate_target
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._currently_executing_test_targets = []
@classmethod
def prepare(cls, options, round_manager):
super().prepare(options, round_manager)
round_manager.require_data(NodePaths)
@classmethod
def supports_passthru_args(cls):
return True
def _run_node_distribution_command(self, command, workunit):
"""Overrides NodeTask._run_node_distribution_command.
This is what is ultimately used to run the Command. It must return the return code of the
process. The base implementation just calls command.run immediately. We override here to
invoke TestRunnerTaskMixin.spawn_and_wait, which ultimately invokes _spawn, which finally
calls command.run.
"""
return self.spawn_and_wait(self._currently_executing_test_targets, command, workunit)
def _execute(self, all_targets):
"""Implements abstract TestRunnerTaskMixin._execute."""
targets = self._get_test_targets()
if not targets:
return
node_paths = self.context.products.get_data(NodePaths)
for target in targets:
node_module = target.dependencies[0]
self.context.log.debug(f"Testing node module (first dependency): {node_module}")
with pushd(node_paths.node_path(node_module)):
self._currently_executing_test_targets = [target]
result, test_command = self.run_script(
target.script_name,
package_manager=self.get_package_manager(target=node_module),
target=target,
script_args=self.get_passthru_args(),
node_paths=node_paths.all_node_paths,
workunit_name=target.address.reference(),
workunit_labels=[WorkUnitLabel.TEST],
)
if result != 0:
raise TaskError(
"test script failed:\n"
"\t{} failed with exit code {}".format(test_command, result)
)
self._currently_executing_test_targets = []
def _spawn(self, command, workunit):
"""Implements abstract TestRunnerTaskMixin._spawn."""
process = command.run(stdout=workunit.output("stdout"), stderr=workunit.output("stderr"))
return SubprocessProcessHandler(process)
def _test_target_filter(self):
"""Implements abstract TestRunnerTaskMixin._test_target_filter."""
return self.is_node_test
def _validate_target(self, target):
"""Implements abstract TestRunnerTaskMixin._validate_target."""
if len(target.dependencies) != 1 or not self.is_node_module(target.dependencies[0]):
message = "NodeTest targets must depend on exactly one NodeModule target."
raise TargetDefinitionException(target, message)
| StarcoderdataPython |
1717377 | import json
import logging
import maven_dependency_utils
import os
import re
import shutil
import subprocess
import sys
import time
desired_sdk = os.getenv("ANDROID_SDK_ROOT")
if not desired_sdk:
logging.error("Environment variable ANDROID_SDK_ROOT must be set.")
exit(-1)
if os.getenv("ANDROID_NDK"):
desired_ndk = os.getenv("ANDROID_NDK")
elif os.path.exists(os.path.join(desired_sdk, "ndk-bundle")):
desired_ndk = os.path.join(desired_sdk, "ndk-bundle")
elif os.path.exists(os.path.join(desired_sdk, "ndk")):
desired_ndk = os.path.join(desired_sdk, "ndk")
else:
logging.error("Environment variable ANDROID_NDK must be set.")
exit(-1)
desired_jdk = os.getenv("JAVA_HOME")
if not desired_jdk:
logging.error("Environment variable JAVA_HOME must be set.")
exit(-1)
def ensure_output_dir(output_dir_path):
# Move out if already exists.
if os.path.exists(output_dir_path):
logging.debug(output_dir_path + " exists ! Trying to move it.")
output_dir_path_copy = output_dir_path + '-' + time.strftime("%Y%m%d-%H%M%S")
shutil.move(output_dir_path, output_dir_path_copy )
# If it still exists, fail the execution.
if os.path.exists(output_dir_path):
logging.error("Unable to cleanup existing dependency directory: " + output_dir_path)
logging.error("Move it away manually and rerun the script.")
## Returns a list of strings, where the strings correspond to standard maven artifacts, i.e. groupId:artifactId:version
def get_dependencies(react_native_dir):
result = subprocess.run('./gradlew :ReactAndroid:dependencies --configuration api', stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=react_native_dir, shell=True)
if (result.returncode == 0):
return re.findall(r'^\S---\s+(\S*)', result.stdout.decode('utf-8'), re.MULTILINE)
else:
logging.error('Failed to get dependencies. Printing gradle output: ')
logging.error(result.stderr.decode('utf-8'))
exit(-1)
def main():
if len(sys.argv) == 2:
react_native_dir = sys.argv[1]
else:
react_native_dir = os.getcwd()
# Some smoke checks to ensure that we have a valid react-native checkout.
packageJsonFile = os.path.join(react_native_dir, "package.json")
with open(packageJsonFile) as packageJsonText:
packageJson = json.load(packageJsonText)
if(packageJson[u'name'] != u'react-native' and packageJson[u'name'] != u'react-native-macos'):
logging.info("Not a valid RN repo path!")
exit(-1)
if (not os.path.join(react_native_dir, "ReactAndroid", "build.gradle")):
logging.info("Not a valid RN repo path!")
exit(-1)
dependency_dir_root = os.path.join(react_native_dir, "android", "dependencies")
dependency_dir_maven = os.path.join(dependency_dir_root, "cross", "cross", "x-none", "maven")
dependency_dir_native = dependency_dir_root
dependency_dir_hermes = os.path.join(dependency_dir_root, "hermes")
log_file_path = os.path.join(react_native_dir, "android", "log_" + time.strftime("%Y%m%d-%H%M%S") + ".txt" )
if(not os.path.exists(os.path.join(react_native_dir, "android"))):
os.mkdir(os.path.join(react_native_dir, "android"))
logging.basicConfig(level = logging.DEBUG, filename = log_file_path)
logging.info("react_native_dir: " + react_native_dir)
logging.info("Maven dependency path: " + dependency_dir_maven)
logging.info("Native dependency path: " + dependency_dir_native)
# Ensure we have an output directory
ensure_output_dir(dependency_dir_root)
# Download a transitive dependency closure of the ReactAndroid project
dependencies = get_dependencies(react_native_dir);
maven_dependency_utils.download_transitive_closure(artifacts=dependencies, output_directory_path=dependency_dir_maven, gradle_path='gradlew', ignore_metadata_redirection=True, resolve_to_single_version=False)
# Extract the native libraries from maven packages
office_abi_mappings = {'arm64-v8a':'droidarm64', 'armeabi-v7a':'droidarm', 'x86':'droidx86','x86_64':'droidx64'}
maven_dependency_utils.extract_native_modules(dependency_dir_maven, dependency_dir_native, office_abi_mappings)
# Copy and extract hermes.
dependency_dir_hermes_android_aar_path = os.path.join(dependency_dir_hermes, "android")
hermes_engine_node_modules_path = os.path.join(react_native_dir, "node_modules", "hermes-engine")
shutil.copytree(os.path.join(hermes_engine_node_modules_path, "android"), dependency_dir_hermes_android_aar_path)
shutil.copytree(os.path.join(hermes_engine_node_modules_path, "linux64-bin"), os.path.join(dependency_dir_hermes, "linux64-bin"))
shutil.copytree(os.path.join(hermes_engine_node_modules_path, "win64-bin"), os.path.join(dependency_dir_hermes, "win64-bin"))
shutil.copytree(os.path.join(hermes_engine_node_modules_path, "osx-bin"), os.path.join(dependency_dir_hermes, "osx-bin"))
shutil.copy(os.path.join(hermes_engine_node_modules_path, "package.json"), dependency_dir_hermes)
dependency_dir_hermes_android_native_debug = os.path.join(dependency_dir_hermes_android_aar_path, "debug")
dependency_dir_hermes_android_native_release = os.path.join(dependency_dir_hermes_android_aar_path, "ship")
maven_dependency_utils.extract_native_modules_from_archive(os.path.join(dependency_dir_hermes_android_aar_path, "hermes-debug.aar"), dependency_dir_hermes_android_native_debug, office_abi_mappings)
maven_dependency_utils.extract_native_modules_from_archive(os.path.join(dependency_dir_hermes_android_aar_path, "hermes-cppruntime-debug.aar"), dependency_dir_hermes_android_native_debug, office_abi_mappings)
maven_dependency_utils.extract_native_modules_from_archive(os.path.join(dependency_dir_hermes_android_aar_path, "hermes-release.aar"), dependency_dir_hermes_android_native_release, office_abi_mappings)
maven_dependency_utils.extract_native_modules_from_archive(os.path.join(dependency_dir_hermes_android_aar_path, "hermes-cppruntime-release.aar"), dependency_dir_hermes_android_native_release, office_abi_mappings)
# Copy log file into the dependency root folder.
shutil.copy(log_file_path, os.path.join(dependency_dir_root))
with open(log_file_path, "r") as fin:
print(fin.read())
if __name__ == '__main__':
main()
| StarcoderdataPython |
4993869 | <filename>c_elegans_wiring/sub_modules/api/api_alpha.py<gh_stars>1-10
import c_elegans_wiring.sub_modules.graph.path_finder as path_finder
import c_elegans_wiring.sub_modules.graph.graph_builder as graph_builder
def build_main_cell_graph(cell_graph_obj, df_list, from_classes_list, to_classes_list):
return graph_builder.build_main_cell_graph(cell_graph_obj=cell_graph_obj,
df_list=df_list,
from_classes_list=from_classes_list, to_classes_list=to_classes_list)
def filter_graph(graph_obj, max_cutoff, is_incremental=False, is_maximal=True):
if is_incremental:
if is_maximal:
sub_graph, all_paths = path_finder.find_maximal_paths(graph_obj=graph_obj, max_cut_off=max_cutoff)
else:
sub_graph, all_paths = path_finder.find_minimal_paths(graph_obj=graph_obj, max_cut_off=max_cutoff)
else:
sub_graph, all_paths = path_finder.find_all_simple_paths_multiple(graph_obj=graph_obj, cut_off=max_cutoff)
if all_paths:
graph_obj.relevant_paths = all_paths
graph_obj.fill_layers_from_paths()
return sub_graph
| StarcoderdataPython |
6596164 | from django.contrib import admin
from .models import Block, BlockTemplate
class BlockAdmin(admin.ModelAdmin):
list_display = ('context', 'weight', 'template', 'site', )
list_filter = ('site', )
class BlockTemplateAdmin(admin.ModelAdmin):
list_display = ('name', 'markup', 'site', )
list_filter = ('site', )
admin.site.register(Block, BlockAdmin)
admin.site.register(BlockTemplate, BlockTemplateAdmin)
| StarcoderdataPython |
6466500 | <filename>prompt_tuning/data/qa.py
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""QA and zeroshot domain transfer QA tasks and mixtures."""
import functools
from prompt_tuning.data import constants
from prompt_tuning.data import features
from prompt_tuning.data import metrics as pt_metrics
from prompt_tuning.data import postprocessors as pt_postprocessors
from prompt_tuning.data import preprocessors as pt_preprocessors
import seqio
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
MRQA_OUT_OF_DOMAIN = ("bio_asq",
"drop",
"duo_rc",
"race",
"relation_extraction",
"textbook_qa")
MRQA_IN_DOMAIN = ("news_qa",
"trivia_qa",
"search_qa",
"hotpot_qa",
"natural_questions")
for model_prefix, feats in features.MODEL_TO_FEATURES.items():
# This is the SQuAD but it can output examples to tensorboard.
seqio.TaskRegistry.add(
f"{model_prefix}squad_v010_allanswers_examples",
source=seqio.TfdsDataSource(tfds_name="squad/v1.1:3.0.0"),
preprocessors=[
preprocessors.squad,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=functools.partial(
pt_postprocessors.postprocess_with_examples,
postprocessors.qa,
example_fields=(
constants.TARGET_TEXT,
constants.CONTEXT_TEXT,
constants.QUESTION_TEXT,
constants.ANSWERS_TEXT,
)),
metric_fns=[
functools.partial(pt_metrics.metric_with_examples, metrics.squad),
functools.partial(
pt_metrics.text_examples,
task_name="squad",
format_fn=pt_metrics.format_qa),
],
output_features=feats)
# ========== MRQA 2019 Shared-task on Generalization of QA ==========
for mrqa_name in MRQA_OUT_OF_DOMAIN:
seqio.TaskRegistry.add(
f"{model_prefix}mrqa_{mrqa_name}_v100_examples",
source=seqio.TfdsDataSource(
tfds_name=f"mrqa/{mrqa_name}:1.0.0",
splits={"validation": "test"}),
preprocessors=[
pt_preprocessors.mrqa,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=functools.partial(
pt_postprocessors.postprocess_with_examples,
postprocessors.qa,
example_fields=(constants.TARGET_TEXT, constants.CONTEXT_TEXT,
constants.QUESTION_TEXT, constants.ANSWERS_TEXT)),
metric_fns=[
functools.partial(pt_metrics.metric_with_examples, metrics.squad),
functools.partial(
pt_metrics.text_examples,
task_name=mrqa_name,
format_fn=pt_metrics.format_qa)
],
output_features=feats)
for mrqa_name in MRQA_IN_DOMAIN:
seqio.TaskRegistry.add(
f"{model_prefix}mrqa_{mrqa_name}_v100_examples",
source=seqio.TfdsDataSource(
tfds_name=f"mrqa/{mrqa_name}:1.0.0"),
preprocessors=[
pt_preprocessors.mrqa,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=functools.partial(
pt_postprocessors.postprocess_with_examples,
postprocessors.qa,
example_fields=(constants.TARGET_TEXT, constants.CONTEXT_TEXT,
constants.QUESTION_TEXT, constants.ANSWERS_TEXT)),
metric_fns=[
functools.partial(pt_metrics.metric_with_examples, metrics.squad),
functools.partial(
pt_metrics.text_examples,
task_name=mrqa_name,
format_fn=pt_metrics.format_qa)
],
output_features=feats)
# ========== Mixtures for Zero-Shot ==========
# Multiple mixtures, each one is train on SQuAD and evaluation on one of the
# MRQA out-of-domain datasets.
for mrqa_name in MRQA_OUT_OF_DOMAIN:
seqio.MixtureRegistry.add(
f"{model_prefix}squad_to_{mrqa_name}_examples", [
f"{model_prefix}squad_v010_allanswers_examples",
f"{model_prefix}mrqa_{mrqa_name}_v100_examples"
],
default_rate=1.0)
# A single mixture that is train on SQuAD, evaluate on all of the MRQA
# out-of-domain datasets.
seqio.MixtureRegistry.add(
f"{model_prefix}squad_to_mrqa_examples",
[f"{model_prefix}squad_v010_allanswers_examples"] +
[f"{model_prefix}mrqa_{name}_v100_examples"
for name in MRQA_OUT_OF_DOMAIN],
default_rate=1.0)
| StarcoderdataPython |
9739253 | <gh_stars>0
#!/usr/bin/python3
from nlp.universal_dependencies import ParsedUniversalDependencies
from services.assistant_services_base import AssistantServicesBase
from .skill import SkillInput, Skill
class TellScheduleSkill(Skill):
"""Lets the assistant tell the user their schedule."""
def __init__(self):
"""Initializes a new instance of the TellScheduleSkill class."""
self._cmd_list = ['what is', 'tell']
def matches_command(self, skill_input: SkillInput) -> bool:
"""Returns a Boolean value indicating whether this skill can be used to handle the given command."""
verb = (skill_input.verb or None) and skill_input.verb.lower()
verb_object = (skill_input.verb_object or None) and skill_input.verb_object.lower()
if verb in self._cmd_list and verb_object == "schedule":
return True
else:
return skill_input.dependencies.pron == "what" and \
skill_input.dependencies.aux == "be" and \
skill_input.dependencies.noun == "schedule"
def execute_for_command(self, skill_input: SkillInput, services: AssistantServicesBase):
"""Executes this skill on the given command input."""
event_list = services.calendar_service.get_todays_events()
if len(event_list) < 1:
output_str = 'There are no events currently scheduled.'
elif len(event_list) == 1:
output_str = ' '.join(['You only have', event_list[0].event_str, 'at',
event_list[0].start_time_str]) + '.'
elif len(event_list) == 2:
output_str = ' '.join(['You have', event_list[0].event_str, 'at',
event_list[0].start_time_str, 'and',
event_list[1].event_str, 'at',
event_list[1].start_time_str]) + '.'
else:
# 3 or more events
output_str = 'You have '
for event in event_list[:-1]:
output_str += ' '.join([event.event_str, 'at',
event.start_time_str]) + ', '
output_str += ' '.join(['and', event_list[-1].event_str, 'at',
event_list[-1].start_time_str]) + '.'
services.user_interaction_service.speak(output_str, skill_input.verbose)
def perform_setup(self, services):
"""Executes any setup work necessary for this skill before it can be used."""
pass | StarcoderdataPython |
4830049 | <reponame>qiangzai00001/hio-prj
#!/usr/bin/python -tt
#
# Copyright (c) 2013 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" This module implements a simple GPT partitions parser which can read the
GPT header and the GPT partition table. """
import struct
import uuid
import binascii
from mic.utils.errors import MountError
_GPT_HEADER_FORMAT = "<8s4sIIIQQQQ16sQIII"
_GPT_HEADER_SIZE = struct.calcsize(_GPT_HEADER_FORMAT)
_GPT_ENTRY_FORMAT = "<16s16sQQQ72s"
_GPT_ENTRY_SIZE = struct.calcsize(_GPT_ENTRY_FORMAT)
_SUPPORTED_GPT_REVISION = '\x00\x00\x01\x00'
def _stringify_uuid(binary_uuid):
""" A small helper function to transform a binary UUID into a string
format. """
uuid_str = str(uuid.UUID(bytes_le = binary_uuid))
return uuid_str.upper()
def _calc_header_crc(raw_hdr):
""" Calculate GPT header CRC32 checksum. The 'raw_hdr' parameter has to
be a list or a tuple containing all the elements of the GPT header in a
"raw" form, meaning that it should simply contain "unpacked" disk data.
"""
raw_hdr = list(raw_hdr)
raw_hdr[3] = 0
raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
return binascii.crc32(raw_hdr) & 0xFFFFFFFF
def _validate_header(raw_hdr):
""" Validate the GPT header. The 'raw_hdr' parameter has to be a list or a
tuple containing all the elements of the GPT header in a "raw" form,
meaning that it should simply contain "unpacked" disk data. """
# Validate the signature
if raw_hdr[0] != 'EFI PART':
raise MountError("GPT partition table not found")
# Validate the revision
if raw_hdr[1] != _SUPPORTED_GPT_REVISION:
raise MountError("Unsupported GPT revision '%s', supported revision " \
"is '%s'" % \
(binascii.hexlify(raw_hdr[1]),
binascii.hexlify(_SUPPORTED_GPT_REVISION)))
# Validate header size
if raw_hdr[2] != _GPT_HEADER_SIZE:
raise MountError("Bad GPT header size: %d bytes, expected %d" % \
(raw_hdr[2], _GPT_HEADER_SIZE))
crc = _calc_header_crc(raw_hdr)
if raw_hdr[3] != crc:
raise MountError("GPT header crc mismatch: %#x, should be %#x" % \
(crc, raw_hdr[3]))
class GptParser:
""" GPT partition table parser. Allows reading the GPT header and the
partition table, as well as modifying the partition table records. """
def __init__(self, disk_path, sector_size = 512):
""" The class constructor which accepts the following parameters:
* disk_path - full path to the disk image or device node
* sector_size - size of a disk sector in bytes """
self.sector_size = sector_size
self.disk_path = disk_path
try:
self._disk_obj = open(disk_path, 'r+b')
except IOError as err:
raise MountError("Cannot open file '%s' for reading GPT " \
"partitions: %s" % (disk_path, err))
def __del__(self):
""" The class destructor. """
self._disk_obj.close()
def _read_disk(self, offset, size):
""" A helper function which reads 'size' bytes from offset 'offset' of
the disk and checks all the error conditions. """
self._disk_obj.seek(offset)
try:
data = self._disk_obj.read(size)
except IOError as err:
raise MountError("cannot read from '%s': %s" % \
(self.disk_path, err))
if len(data) != size:
raise MountError("cannot read %d bytes from offset '%d' of '%s', " \
"read only %d bytes" % \
(size, offset, self.disk_path, len(data)))
return data
def _write_disk(self, offset, buf):
""" A helper function which writes buffer 'buf' to offset 'offset' of
the disk. This function takes care of unaligned writes and checks all
the error conditions. """
# Since we may be dealing with a block device, we only can write in
# 'self.sector_size' chunks. Find the aligned starting and ending
# disk offsets to read.
start = (offset / self.sector_size) * self.sector_size
end = ((start + len(buf)) / self.sector_size + 1) * self.sector_size
data = self._read_disk(start, end - start)
off = offset - start
data = data[:off] + buf + data[off + len(buf):]
self._disk_obj.seek(start)
try:
self._disk_obj.write(data)
except IOError as err:
raise MountError("cannot write to '%s': %s" % (self.disk_path, err))
def read_header(self, primary = True):
""" Read and verify the GPT header and return a dictionary containing
the following elements:
'signature' : header signature
'revision' : header revision
'hdr_size' : header size in bytes
'hdr_crc' : header CRC32
'hdr_lba' : LBA of this header
'hdr_offs' : byte disk offset of this header
'backup_lba' : backup header LBA
'backup_offs' : byte disk offset of backup header
'first_lba' : first usable LBA for partitions
'first_offs' : first usable byte disk offset for partitions
'last_lba' : last usable LBA for partitions
'last_offs' : last usable byte disk offset for partitions
'disk_uuid' : UUID of the disk
'ptable_lba' : starting LBA of array of partition entries
'ptable_offs' : disk byte offset of the start of the partition table
'ptable_size' : partition table size in bytes
'entries_cnt' : number of available partition table entries
'entry_size' : size of a single partition entry
'ptable_crc' : CRC32 of the partition table
'primary' : a boolean, if 'True', this is the primary GPT header,
if 'False' - the secondary
'primary_str' : contains string "primary" if this is the primary GPT
header, and "backup" otherwise
This dictionary corresponds to the GPT header format. Please, see the
UEFI standard for the description of these fields.
If the 'primary' parameter is 'True', the primary GPT header is read,
otherwise the backup GPT header is read instead. """
# Read and validate the primary GPT header
raw_hdr = self._read_disk(self.sector_size, _GPT_HEADER_SIZE)
raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
_validate_header(raw_hdr)
primary_str = "primary"
if not primary:
# Read and validate the backup GPT header
raw_hdr = self._read_disk(raw_hdr[6] * self.sector_size, _GPT_HEADER_SIZE)
raw_hdr = struct.unpack(_GPT_HEADER_FORMAT, raw_hdr)
_validate_header(raw_hdr)
primary_str = "backup"
return { 'signature' : raw_hdr[0],
'revision' : raw_hdr[1],
'hdr_size' : raw_hdr[2],
'hdr_crc' : raw_hdr[3],
'hdr_lba' : raw_hdr[5],
'hdr_offs' : raw_hdr[5] * self.sector_size,
'backup_lba' : raw_hdr[6],
'backup_offs' : raw_hdr[6] * self.sector_size,
'first_lba' : raw_hdr[7],
'first_offs' : raw_hdr[7] * self.sector_size,
'last_lba' : raw_hdr[8],
'last_offs' : raw_hdr[8] * self.sector_size,
'disk_uuid' :_stringify_uuid(raw_hdr[9]),
'ptable_lba' : raw_hdr[10],
'ptable_offs' : raw_hdr[10] * self.sector_size,
'ptable_size' : raw_hdr[11] * raw_hdr[12],
'entries_cnt' : raw_hdr[11],
'entry_size' : raw_hdr[12],
'ptable_crc' : raw_hdr[13],
'primary' : primary,
'primary_str' : primary_str }
def _read_raw_ptable(self, header):
""" Read and validate primary or backup partition table. The 'header'
argument is the GPT header. If it is the primary GPT header, then the
primary partition table is read and validated, otherwise - the backup
one. The 'header' argument is a dictionary which is returned by the
'read_header()' method. """
raw_ptable = self._read_disk(header['ptable_offs'],
header['ptable_size'])
crc = binascii.crc32(raw_ptable) & 0xFFFFFFFF
if crc != header['ptable_crc']:
raise MountError("Partition table at LBA %d (%s) is corrupted" % \
(header['ptable_lba'], header['primary_str']))
return raw_ptable
def get_partitions(self, primary = True):
""" This is a generator which parses the GPT partition table and
generates the following dictionary for each partition:
'index' : the index of the partition table endry
'offs' : byte disk offset of the partition table entry
'type_uuid' : partition type UUID
'part_uuid' : partition UUID
'first_lba' : the first LBA
'last_lba' : the last LBA
'flags' : attribute flags
'name' : partition name
'primary' : a boolean, if 'True', this is the primary partition
table, if 'False' - the secondary
'primary_str' : contains string "primary" if this is the primary GPT
header, and "backup" otherwise
This dictionary corresponds to the GPT header format. Please, see the
UEFI standard for the description of these fields.
If the 'primary' parameter is 'True', partitions from the primary GPT
partition table are generated, otherwise partitions from the backup GPT
partition table are generated. """
if primary:
primary_str = "primary"
else:
primary_str = "backup"
header = self.read_header(primary)
raw_ptable = self._read_raw_ptable(header)
for index in xrange(0, header['entries_cnt']):
start = header['entry_size'] * index
end = start + header['entry_size']
raw_entry = struct.unpack(_GPT_ENTRY_FORMAT, raw_ptable[start:end])
if raw_entry[2] == 0 or raw_entry[3] == 0:
continue
part_name = str(raw_entry[5].decode('UTF-16').split('\0', 1)[0])
yield { 'index' : index,
'offs' : header['ptable_offs'] + start,
'type_uuid' : _stringify_uuid(raw_entry[0]),
'part_uuid' : _stringify_uuid(raw_entry[1]),
'first_lba' : raw_entry[2],
'last_lba' : raw_entry[3],
'flags' : raw_entry[4],
'name' : part_name,
'primary' : primary,
'primary_str' : primary_str }
def _change_partition(self, header, entry):
""" A helper function for 'change_partitions()' which changes a
a paricular instance of the partition table (primary or backup). """
if entry['index'] >= header['entries_cnt']:
raise MountError("Partition table at LBA %d has only %d " \
"records cannot change record number %d" % \
(header['entries_cnt'], entry['index']))
# Read raw GPT header
raw_hdr = self._read_disk(header['hdr_offs'], _GPT_HEADER_SIZE)
raw_hdr = list(struct.unpack(_GPT_HEADER_FORMAT, raw_hdr))
_validate_header(raw_hdr)
# Prepare the new partition table entry
raw_entry = struct.pack(_GPT_ENTRY_FORMAT,
uuid.UUID(entry['type_uuid']).bytes_le,
uuid.UUID(entry['part_uuid']).bytes_le,
entry['first_lba'],
entry['last_lba'],
entry['flags'],
entry['name'].encode('UTF-16'))
# Write the updated entry to the disk
entry_offs = header['ptable_offs'] + \
header['entry_size'] * entry['index']
self._write_disk(entry_offs, raw_entry)
# Calculate and update partition table CRC32
raw_ptable = self._read_disk(header['ptable_offs'],
header['ptable_size'])
raw_hdr[13] = binascii.crc32(raw_ptable) & 0xFFFFFFFF
# Calculate and update the GPT header CRC
raw_hdr[3] = _calc_header_crc(raw_hdr)
# Write the updated header to the disk
raw_hdr = struct.pack(_GPT_HEADER_FORMAT, *raw_hdr)
self._write_disk(header['hdr_offs'], raw_hdr)
def change_partition(self, entry):
""" Change a GPT partition. The 'entry' argument has the same format as
'get_partitions()' returns. This function simply changes the partition
table record corresponding to 'entry' in both, the primary and the
backup GPT partition tables. The parition table CRC is re-calculated
and the GPT headers are modified accordingly. """
# Change the primary partition table
header = self.read_header(True)
self._change_partition(header, entry)
# Change the backup partition table
header = self.read_header(False)
self._change_partition(header, entry)
| StarcoderdataPython |
375182 | def miniMaxSum(arr):
arr.sort()
print(sum(arr[:-1]), sum(arr[1:]))
| StarcoderdataPython |
11258373 | import os
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import argparse
import logging
import time
import random
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from networks.vnet_multi_head import VNetMultiHead
from dataloaders.la_heart import LAHeart, RandomCrop, CenterCrop, RandomRotFlip, ToTensor, TwoStreamBatchSampler
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg
"""
Train a multi-head vnet to output
1) predicted segmentation
2) regress the signed distance function map
e.g.
Deep Distance Transform for Tubular Structure Segmentation in CT Scans
https://arxiv.org/abs/1912.03383
Shape-Aware Complementary-Task Learning for Multi-Organ Segmentation
https://arxiv.org/abs/1908.05099
"""
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--exp', type=str, default='vnet_dp_la_MH_SDFL1PlusL2', help='model_name;dp:add dropout; MH:multi-head')
parser.add_argument('--max_iterations', type=int, default=10000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument('--base_lr', type=float, default=0.01, help='maximum epoch number to train')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=2019, help='random seed')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
args = parser.parse_args()
train_data_path = args.root_path
snapshot_path = "../model_la/" + args.exp + "/"
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
batch_size = args.batch_size * len(args.gpu.split(','))
max_iterations = args.max_iterations
base_lr = args.base_lr
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
patch_size = (112, 112, 80)
num_classes = 2
def dice_loss(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def compute_sdf(img_gt, out_shape):
"""
compute the signed distance map of binary mask
input: segmentation, shape = (batch_size,c, x, y, z)
output: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
normalize sdf to [-1,1]
"""
img_gt = img_gt.astype(np.uint8)
normalized_sdf = np.zeros(out_shape)
for b in range(out_shape[0]): # batch size
for c in range(out_shape[1]):
posmask = img_gt[b].astype(np.bool)
if posmask.any():
negmask = ~posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
sdf[boundary==1] = 0
normalized_sdf[b][c] = sdf
assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
return normalized_sdf
if __name__ == "__main__":
## make logger file
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code', shutil.ignore_patterns(['.git','__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
net = VNetMultiHead(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=True)
net = net.cuda()
db_train = LAHeart(base_dir=train_data_path,
split='train',
num=16,
transform = transforms.Compose([
RandomRotFlip(),
RandomCrop(patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed+worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
net.train()
optimizer = optim.SGD(net.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
writer = SummaryWriter(snapshot_path+'/log', flush_secs=2)
logging.info("{} itertations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations//len(trainloader)+1
lr_ = base_lr
net.train()
for epoch_num in tqdm(range(max_epoch), ncols=70):
for i_batch, sampled_batch in enumerate(trainloader):
# generate paired iput
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs, out_dis = net(volume_batch)
out_dis = torch.tanh(out_dis)
with torch.no_grad():
gt_dis = compute_sdf(label_batch.cpu().numpy(), out_dis.shape)
gt_dis = torch.from_numpy(gt_dis).float().cuda()
# compute CE + Dice loss
loss_ce = F.cross_entropy(outputs, label_batch)
outputs_soft = F.softmax(outputs, dim=1)
loss_dice = dice_loss(outputs_soft[:, 1, :, :, :], label_batch == 1)
# compute L1 + L2 Loss
loss_dist = torch.norm(out_dis-gt_dis, 1)/torch.numel(out_dis) + F.mse_loss(out_dis, gt_dis)
loss = loss_ce + loss_dice + loss_dist
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_num = iter_num + 1
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar('loss/loss_ce', loss_ce, iter_num)
writer.add_scalar('loss/loss_dice', loss_dice, iter_num)
writer.add_scalar('loss/loss_dist', loss_dist, iter_num)
writer.add_scalar('loss/loss', loss, iter_num)
logging.info('iteration %d : loss_dist : %f' % (iter_num, loss_dist.item()))
logging.info('iteration %d : loss_dice : %f' % (iter_num, loss_dice.item()))
logging.info('iteration %d : loss : %f' % (iter_num, loss.item()))
if iter_num % 2 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3,0,1,2).repeat(1,3,1,1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
outputs_soft = F.softmax(outputs, 1)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label', grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label', grid_image, iter_num)
out_dis_slice = out_dis[0, 0, :, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(out_dis_slice, 5, normalize=False)
writer.add_image('train/out_dis_map', grid_image, iter_num)
gt_dis_slice = gt_dis[0, 0,:, :, 20:61:10].unsqueeze(0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(gt_dis_slice, 5, normalize=False)
writer.add_image('train/gt_dis_map', grid_image, iter_num)
## change lr
if iter_num % 2500 == 0:
lr_ = base_lr * 0.1 ** (iter_num // 1000)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
if iter_num % 1000 == 0:
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(net.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num > max_iterations:
break
time1 = time.time()
if iter_num > max_iterations:
break
save_mode_path = os.path.join(snapshot_path, 'iter_'+str(max_iterations+1)+'.pth')
torch.save(net.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
writer.close()
| StarcoderdataPython |
6642966 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('etl', '0019_auto_20170328_1519'),
]
operations = [
migrations.AlterField(
model_name='featureprocess',
name='feature_name',
field=models.CharField(unique=True, max_length=100, verbose_name='\u7279\u5f81\u5b57\u6bb5\u540d'),
preserve_default=True,
),
]
| StarcoderdataPython |
6591633 | import unittest
from elote import ECFCompetitor
class TestECF(unittest.TestCase):
def test_Improvement(self):
initial_rating = 100
player1 = ECFCompetitor(initial_rating=initial_rating)
# if player1 beats someone with a high rating, their rating should go up.
for _ in range(10):
player2 = ECFCompetitor(initial_rating=800)
player1.beat(player2)
self.assertGreater(player1.rating, initial_rating)
initial_rating = player1.rating
def test_Decay(self):
initial_rating = 800
player1 = ECFCompetitor(initial_rating=initial_rating)
# if player1 beats someone with a high rating, their rating should go up.
for _ in range(10):
player2 = ECFCompetitor(initial_rating=100)
player2.beat(player1)
self.assertLess(player1.rating, initial_rating)
initial_rating = player1.rating
def test_Expectation(self):
player1 = ECFCompetitor(initial_rating=1000)
player2 = ECFCompetitor(initial_rating=100)
self.assertGreater(player1.expected_score(player2), player2.expected_score(player1)) | StarcoderdataPython |
3385794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: <NAME>
# GitHub: https://github.com/SwordYork/sequencing
# No rights reserved.
#
import sequencing_np as snp
import tensorflow as tf
from sequencing import TIME_MAJOR
from sequencing.data.vocab import Vocab
from sequencing.decoders.feedback import TrainingFeedBack
from sequencing_np import np
from sequencing_np.decoders.feedback import TrainingFeedBackTest
def test_training_feedback():
batch_size = 2
time_steps = 4
vocab_size = 17
embedding_dim = 12
vocab = Vocab([chr(ord('a') + i) for i in range(vocab_size)], embedding_dim)
# dynamical batch size
inputs = tf.placeholder(tf.int32, shape=(None, None),
name='source_ids')
sequence_length = tf.placeholder(tf.int32, shape=(None,),
name='source_seq_length')
feedback = TrainingFeedBack(inputs, sequence_length, vocab,
name='feedback_test')
finished_list = []
output_list = []
for i in range(time_steps):
outputs = feedback.next_inputs(i)
finished_list.append(outputs[0])
output_list.append(outputs[1])
sequence_length_np = np.random.randint(1, time_steps + 1, batch_size,
dtype=np.int32)
sequence_length_np_b2 = np.random.randint(1, time_steps + 1,
batch_size * 2,
dtype=np.int32)
if TIME_MAJOR:
inputs_np = np.random.randint(0, vocab_size, (time_steps, batch_size),
dtype=np.int32)
inputs_np_b2 = np.random.randint(0, vocab_size,
(time_steps, batch_size * 2),
dtype=np.int32)
else:
inputs_np = np.random.randint(0, vocab_size, (batch_size, time_steps),
dtype=np.int32)
inputs_np_b2 = np.random.randint(0, vocab_size,
(batch_size * 2, time_steps),
dtype=np.int32)
init = tf.global_variables_initializer()
train_vars = tf.trainable_variables()
with tf.Session() as sess:
sess.run(init)
train_vars_vals = sess.run(train_vars)
dict_var_vals = {k.name.split(':')[0]: v for k, v in zip(train_vars,
train_vars_vals)}
tf_outputs = sess.run(finished_list + output_list,
feed_dict={inputs: inputs_np,
sequence_length: sequence_length_np})
tf_outputs_b2 = sess.run(finished_list + output_list,
feed_dict={inputs: inputs_np_b2,
sequence_length: sequence_length_np_b2})
# print(inputs_np, tf_outputs, tf_outputs_b2, dict_var_vals)
feedback_np = TrainingFeedBackTest(inputs_np, sequence_length_np, vocab,
name='feedback_test')
graph = snp.Graph()
graph.initialize(dict_var_vals)
for idx in range(time_steps):
outputs_np = feedback_np.next_inputs(idx)
np.testing.assert_array_almost_equal(outputs_np[1],
tf_outputs[idx + time_steps])
np.testing.assert_array_almost_equal(outputs_np[0],
tf_outputs[idx])
if __name__ == '__main__':
test_training_feedback()
| StarcoderdataPython |
6422347 | """
---> Greatest Sum Divisible by Three
--> Medium
"""
import math
class Solution:
def maxSumDivThree(self, nums) -> int:
total = 0
rem1 = [math.inf, math.inf]
rem2 = [math.inf, math.inf]
for num in nums:
if num % 3 == 1:
if num < rem1[0]:
rem1[0], rem1[1] = num, rem1[0]
elif num < rem1[1]:
rem1[1] = num
elif num % 3 == 2:
if num < rem2[0]:
rem2[0], rem2[1] = num, rem2[0]
elif num < rem2[1]:
rem2[1] = num
total += num
if total % 3 == 1:
if math.inf in rem2:
return total - rem1[0]
total -= min(sum(rem2), rem1[0])
elif total % 3 == 2:
if math.inf in rem1:
return total - rem2[0]
total -= min(sum(rem1), rem2[0])
return total
in_nums = [3, 6, 5, 1, 8]
a = Solution()
print(a.maxSumDivThree(in_nums))
"""
We only need to keep track of 2 minimum numbers with remainder 1 and 2 then check if the total sum has a remainder 1
then take minimum of the smallest number with remainder 1 or sum of smallest 2 with remainder 2 else if total has
remainder 2 then take minimum of sum of 2 smallest with remainder 1 or smallest with remainder 2
Reference - https://leetcode.com/problems/greatest-sum-divisible-by-three/discuss/1646188/Python-O(N)-without-DP-O(1)-space
Complexities:
Time -> O(N)
Space -> O(1)
"""
| StarcoderdataPython |
377046 | """The base interface for all services."""
import enum
class ServiceState(enum.Enum):
"""The enum defining service states."""
ACTIVE = "active"
INACTIVE = "inactive"
FAILED = "failed"
@classmethod
def from_value(cls, value):
"""Return an instance of ServiceState that matches the given value."""
for e in cls:
if isinstance(e, ServiceState):
if e.value == value:
return e
raise KeyError("No value matching '{}' found in enumeration {}.".format(value, cls.__name__))
class BaseService(object):
"""The base interface class for all services."""
def __init__(self, service_id):
self._id = service_id
self._name = None
@property
def id(self): # pylint: disable=invalid-name
"""Getter for the service id property."""
return self._id
@property
def name(self):
"""Getter for the service name property."""
return self._name or self._id
@name.setter
def name(self, name):
"""Setter for the service name property."""
assert name is None or isinstance(name, str)
self._name = name
def state(self):
"""Return the state of the service."""
raise NotImplementedError()
def set_state(self, new_state):
"""Set the state of the service."""
raise NotImplementedError()
def to_json_dict(self):
"""Return the service instance as a dict object suitable for conversion to JSON."""
json_dict = {
"id": self._id,
"state": self.state().value,
}
if self._name:
json_dict["name"] = self._name
return json_dict
| StarcoderdataPython |
3424809 | from functools import reduce
from dateutil.parser import parse
from django.db.models import Case, Count, F, IntegerField, Manager, Max, Sum, When
from kolibri.auth.models import FacilityUser
from kolibri.content.content_db_router import default_database_is_attached, get_active_content_database
from kolibri.content.models import ContentNode
from kolibri.logger.models import ContentSummaryLog
from le_utils.constants import content_kinds
from rest_framework import serializers
from .utils.return_users import get_members_or_user
class UserReportSerializer(serializers.ModelSerializer):
progress = serializers.SerializerMethodField()
last_active = serializers.SerializerMethodField()
class Meta:
model = FacilityUser
fields = (
'pk', 'username', 'full_name', 'progress', 'last_active',
)
def get_progress(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
# progress details for a topic node and everything under it
if content_node.kind == content_kinds.TOPIC:
kind_counts = content_node.get_descendant_kind_counts()
topic_details = ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.values('kind') \
.annotate(total_progress=Sum('progress')) \
.annotate(log_count_total=Count('pk')) \
.annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))
# evaluate queryset so we can add data for kinds that do not have logs
topic_details = list(topic_details)
for kind in topic_details:
del kind_counts[kind['kind']]
for key in kind_counts:
topic_details.append({'kind': key, 'total_progress': 0.0, 'log_count_total': 0, 'log_count_complete': 0})
return topic_details
else:
# progress details for a leaf node (exercise, video, etc.)
leaf_details = ContentSummaryLog.objects \
.filter(user=target_user) \
.filter(content_id=content_node.content_id) \
.annotate(total_progress=F('progress')) \
.values('kind', 'time_spent', 'total_progress')
return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0.0}]
def get_last_active(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
try:
if content_node.kind == content_kinds.TOPIC:
return ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.latest('end_timestamp').end_timestamp
else:
return ContentSummaryLog.objects \
.filter(user=target_user) \
.get(content_id=content_node.content_id).end_timestamp
except ContentSummaryLog.DoesNotExist:
return None
def sum_progress_dicts(total_progress, progress_dict):
return total_progress + progress_dict.get('total_progress', 0.0)
def get_progress_and_last_active(target_nodes, **kwargs):
# Prepare dictionaries to output the progress and last active, keyed by content_id
output_progress_dict = {}
output_last_active_dict = {}
# Get a list of all the users that we are querying
users = list(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))
# Get a list of all content ids for all target nodes and their descendants
content_ids = target_nodes.get_descendants(include_self=True).order_by().values_list("content_id", flat=True)
# get all summary logs for the current user that correspond to the content nodes and descendant content nodes
if default_database_is_attached(): # if possible, do a direct join between the content and default databases
channel_alias = get_active_content_database()
SummaryLogManager = ContentSummaryLog.objects.using(channel_alias)
else: # otherwise, convert the leaf queryset into a flat list of ids and use that
SummaryLogManager = ContentSummaryLog.objects
content_ids = list(content_ids)
# Filter by users and the content ids
progress_query = SummaryLogManager \
.filter(user__in=users, content_id__in=content_ids)
# Conditionally filter by last active time
if kwargs.get('last_active_time'):
progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))
# Get an annotated list of dicts of type:
# {
# 'content_id': <content_id>,
# 'kind': <kind>,
# 'total_progress': <sum of all progress for this content>,
# 'log_count_total': <number of summary logs for this content>,
# 'log_count_complete': <number of complete summary logs for this content>,
# 'last_active': <most recent end_timestamp for this content>,
# }
progress_list = progress_query.values('content_id', 'kind').annotate(
total_progress=Sum('progress'),
log_count_total=Count('pk'),
log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())),
last_active=Max('end_timestamp'))
# Evaluate query and make a loop dict of all progress
progress_dict = {item.get('content_id'): item for item in progress_list}
if isinstance(target_nodes, ContentNode):
# Have been passed an individual model
target_nodes = [target_nodes]
# Loop through each node to add progress and last active information to the output dicts
for target_node in target_nodes:
# In the case of a topic, we need to look at the progress and last active from each of its descendants
if target_node.kind == content_kinds.TOPIC:
# Get all the content_ids and kinds of each leaf node as a tuple
# (about half the size of the dict from 'values' method)
# Remove topics in generator comprehension, rather than using .exclude as kind is not indexed
# Use set to remove repeated content
leaf_nodes = set(node for node in target_node.get_descendants(include_self=False).order_by().values_list(
'content_id', 'kind') if node[1] != content_kinds.TOPIC)
# Get a unique set of all non-topic content kinds
leaf_kinds = sorted(set(leaf_node[1] for leaf_node in leaf_nodes))
# Create a list of progress summary dicts for each content kind
progress = [{
# For total progress sum across all the progress dicts for the descendant content leaf nodes
'total_progress': reduce(
# Reduce with a function that just adds the total_progress of the passed in dict to the accumulator
sum_progress_dicts,
# Get all dicts of progress for every leaf_id that has some progress recorded
# and matches the kind we are aggregating over
(progress_dict.get(leaf_node[0]) for leaf_node in leaf_nodes\
if leaf_node[0] in progress_dict and leaf_node[1] == kind),
# Pass in an initial value of total_progress as zero to initialize the reduce
0.0,
),
'kind': kind,
# Count the number of leaf nodes of this particular kind
'node_count': reduce(lambda x, y: x + int(y[1] == kind), leaf_nodes, 0)
} for kind in leaf_kinds]
# Set the output progress for this topic to this list of progress dicts
output_progress_dict[target_node.content_id] = progress
# Create a generator of last active times for the leaf_ids
last_active_times = map(
# Return the last active time for this leaf_node
lambda leaf_node: progress_dict[leaf_node[0]]['last_active'],
filter(
# Filter leaf_nodes to those that are in the progress_dict
lambda leaf_node: leaf_node[0] in progress_dict,
leaf_nodes))
# Max does not handle empty iterables, so try this
try:
# If it is not empty, great!
output_last_active_dict[target_node.content_id] = max(last_active_times)
except (ValueError, TypeError):
# If it is empty, catch the value error and set the last active time to None
# If they are all none, catch the TypeError and also set to None
output_last_active_dict[target_node.content_id] = None
else:
if target_node.content_id in progress_dict:
progress = progress_dict.pop(target_node.content_id)
output_last_active_dict[target_node.content_id] = progress.pop('last_active')
# return as array for consistency in api
output_progress_dict[target_node.content_id] = [{
'total_progress': progress['total_progress'],
'log_count_total': progress['log_count_total'],
'log_count_complete': progress['log_count_complete'],
}]
elif target_node.content_id not in output_progress_dict:
# Not in the progress dict, but also not in our output, so supply default values
output_last_active_dict[target_node.content_id] = None
output_progress_dict[target_node.content_id] = [{
'total_progress': 0.0,
'log_count_total': 0,
'log_count_complete': 0,
}]
return output_progress_dict, output_last_active_dict
class ContentReportListSerializer(serializers.ListSerializer):
def to_representation(self, data):
if not data:
return data
if 'request' not in self.context:
progress_dict = {}
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(data, **kwargs)
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, Manager) else data
return [
self.child.to_representation(
item,
progress=progress_dict.get(item.content_id),
last_active=last_active_dict.get(item.content_id)) for item in iterable
]
class ContentReportSerializer(serializers.ModelSerializer):
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind',
)
list_serializer_class = ContentReportListSerializer
def to_representation(self, instance, progress=None, last_active=None):
if progress is None:
if 'request' not in self.context:
progress = [{'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0}]
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(instance, **kwargs)
progress = progress_dict.get(instance.content_id)
last_active = last_active_dict.get(instance.content_id)
value = super(ContentReportSerializer, self).to_representation(instance)
value['progress'] = progress
value['last_active'] = last_active
return value
class ContentSummarySerializer(ContentReportSerializer):
ancestors = serializers.SerializerMethodField()
num_users = serializers.SerializerMethodField()
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind', 'ancestors', 'num_users',
)
list_serializer_class = ContentReportListSerializer
def get_ancestors(self, target_node):
"""
in descending order (root ancestor first, immediate parent last)
"""
return target_node.get_ancestors().values('pk', 'title')
def get_num_users(self, target_node):
kwargs = self.context['view'].kwargs
return get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']).count()
| StarcoderdataPython |
4973860 | <reponame>Faudil/music_generator<gh_stars>0
import numpy as np
class Markov_elem:
def __init__(self, word):
self._word = word
self._followings = []
self._following_nbr = []
self._following_proba = []
def add_following(self, following_word):
if following_word in self._followings:
self._following_nbr[self._followings.index(following_word)] += 1
self._followings.append(following_word)
self._following_nbr.append(1)
def calc_following_proba(self):
all = sum(self._following_nbr)
self._following_proba = []
for nbr in self._following_nbr:
self._following_proba.append(nbr / all)
def get_following_proba(self, word):
if word not in self._followings:
return 0
return self._following_proba[self._followings.index(word)]
def pick_next(self):
if not self._following_proba:
raise Exception("Can't pick word without it's probability")
return np.random.choice(self._followings, 1, p=self._following_proba)[0]
def getword(self):
return self._word | StarcoderdataPython |
176350 | <reponame>NYPL/NYPL-Tenable-Jira
import sqlite3 #SQLite DB
from src.config import Config #Config load
configInformation = Config()
scanNameList = configInformation.getTenableScanName()
connection = sqlite3.connect(r"..\databases\Tenable_DB.db")
cursor = connection.cursor()
cursor.execute("SELECT name from sqlite_master WHERE type='table';")
tableList = []
flag = False
for table in cursor.fetchall():
tableList.append(str(table[0]))
for scanName in scanNameList:
scanName = scanName.replace("-","_") #char sensitivity
history = scanName + "_HISTORY"
map = scanName + "_MAP"
if history not in tableList:
print("Creating Table:",history)
cursor.execute("CREATE TABLE " + history + " (\
Scan_Date DATE PRIMARY KEY,\
Scan_Status BOOLEAN,\
Tickets_Created BOOLEAN\
);"
)
flag = True
if map not in tableList:
print("Creating Table:",map)
cursor.execute("CREATE TABLE " + map + " (\
Scan_Date DATE,\
Host TEXT,\
Name TEXT,\
Risk TEXT,\
Description TEXT,\
OS TEXT\
);"
)
flag = True
if flag == False:
print("All tables exist for:",tableList)
else:
print("Database Table Update Complete")
| StarcoderdataPython |
5090637 | #!/usr/bin/env python
import snmp_helper
if True:
IP = "192.168.127.12"
a_user="pysnmp"
auth_key = "galileo1"
encrypt_key= "galileo1"
snmp_user = (a_user, auth_key, encrypt_key)
pynet_rtr1 = (IP, 7961)
pynet_rtr2 = (IP, 8061)
snmp_oids = (
("ifInOctets", "1.3.6.1.2.1.2.2.1.10.5", True),
("ifOutOctets", "1.3.6.1.2.1.2.2.1.16.5", True),
("ifInUcastPkts", "1.3.6.1.2.1.2.2.1.11.5", True),
("ifOutUcastPkts", "1.3.6.1.2.1.2.2.1.17.5", True),
("ifSpeed", "1.3.6.1.2.1.2.2.1.5.5", True)
)
for desc,an_oid,is_count in snmp_oids:
snmp_data = snmp_helper.snmp_get_oid_v3(pynet_rtr1, snmp_user, oid=an_oid)
output = snmp_helper.snmp_extract(snmp_data)
print "%s %s"%(desc, output)
# function snmp_get_oid_v3 is part of snmp helper module
| StarcoderdataPython |
1674108 | <filename>learn_python/numbers-strings.py
# -*- coding: utf-8 -*-
# !/usr/bin/env python3
# Created on Fri Oct 27 19:49:29 2017
# @author: gaiusyao
from math import exp, log, sqrt
print("Hello world")
hello_world = "Hello world"
print(hello_world)
hello_world = "Hello Python world"
print(hello_world)
# int
x = 42
print("Output #1: {0}".format(x))
print("Output #2: {0}".format(3**4))
print("Output #3: {0}".format(int(8.1)/int(2.7)))
# float
print("Output #4: {0:.3f}".format(8.1/2.7))
y = 2.5 * 4.8
print("Output #5: {0:.1f}".format(y))
r = 8 / float(3)
print("Output #6: {0:.2f}".format(r))
print("Output #7: {0:.4f}".format(8.0/3))
# math module
z = 6
print("Output #8: {0:.4f}".format(exp(z)))
print("Output #9: {0:.2f}".format(log(z)))
print("Output #10: {0:.1f}".format(sqrt(z)))
# string eaxmple
print("Output #11: {0:s}".format('I\'m enjoying learning Python.'))
print("Output #12: {0:s}".format("This is a long string. Without the backslash\
it would run off of the page on the right in the text editor and be very\
difficult to read and edit. By using the backslash you can split the long\
string into smaller strings on separate lines so that the whole string is easy\
to view in the text editor."))
print("Output #13: {0:s}".format('''You can use triple single quotes
for multi-line comment strings.'''))
print("Output #14: {0:s}".format("""You can also use triple double quotes
for multi-line comment strings."""))
# index
gaius_yao = "Product Manager"
gaius_yao[0] # 'P',字符串第一个字符
gaius_yao[2] # 'o',字符串第二个字符
gaius_yao[-1] # 'r',字符串最后一个字符
gaius_yao[:7] # 'Product',字符串前七个字符
gaius_yao[-7:] # 'Manager',字符串后七个字符
# split()
string1 = "My deliverable is due in May"
string1_list1 = string1.split()
string1_list2 = string1.split(" ",2)
print("Output #15: {0}".format(string1_list1))
print("Output #16: FIRST PIECE:{0}; SECOND PIECE:{1}; THIRD PIECE:{2}"\
.format(string1_list2[0], string1_list2[1], string1_list2[2]))
string2 = "Your,deliverable,is,due,in,June"
string2_list = string2.split(',')
print("Output #17: {0}".format(string2_list))
print("Output #18: {0} {1} {2}".format(string2_list[1], string2_list[5],\
string2_list[-1]))
# join()
print("Output #19: {0}".format(','.join(string2_list)))
# strip()
string3 = " Remove unwanted characters from this string.\t\t \n"
print("Output #20: string3: {0:s}".format(string3))
string3_lstrip = string3.lstrip()
print("Output #21: lstrip: {0:s}".format(string3_lstrip))
string3_rstrip = string3.rstrip()
print("Output #22: rstrip: {0:s}".format(string3_rstrip))
string3_strip = string3.strip()
print("Output #23: strip: {0:s}".format(string3_strip))
string4 = "$$Here's another string that has unwanted characters.__---++"
print("Output #24: {0:s}".format(string4))
string4 = "$$The unwanted characters have been removed.__---++"
string4_strip = string4.strip('$_-+')
print("Output #25: {0:s}".format(string4_strip))
# replace()
string5 = "Let's replace the spaces in this sentence with other characters."
string5_replace = string5.replace(" ", "!@!")
print("Output #26 (with !@!): {0:s}".format(string5_replace))
string5_replace = string5.replace(" ", ",")
print("Output #27 (with commas): {0:s}".format(string5_replace))
# lower()、upper()和capitalize()
string6 = "Here's WHAT Happens WHEN You Use lower."
print("Output #28: {0:s}".format(string6.lower()))
string7 = "Here's what Happens when You Use UPPER."
print("Output #29: {0:s}".format(string7.upper()))
string5 = "here's WHAT Happens WHEN you use Capitalize."
print("Output #30: {0:s}".format(string5.capitalize()))
string5_list = string5.split()
print("Output #31 (on each word):")
for word in string5_list:
print("{0:s}".format(word.capitalize()))
# len()
question = "the answer to life universe and everything"
print("The answer is {0}".format(len(question))) | StarcoderdataPython |
1669462 | #!/usr/bin/python
from pathlib import Path
import datetime
now = datetime.datetime.now()
year = now.year
month = now.month
name = input('Enter article name:')
path1 = Path('articles') / str(year) / str(month)
path1.mkdir(parents=True, exist_ok=True)
path2 = path1 / f'{name}.txt'
path2.touch()
print(f'Article created at: {path2}')
| StarcoderdataPython |
6617355 | <gh_stars>1-10
import microtest
from auth_server.extensions import orm
from auth_server.models import User
import auth_server.security as security
@microtest.setup
def setup(app):
global ctx
ctx = app.app_context()
ctx.push()
@microtest.reset
def reset():
User.query.delete()
@microtest.cleanup
def cleanup():
reset_database()
ctx.pop()
@microtest.test
def test_typechecking():
#invalid_type
assert microtest.raises(
security.register_user,
{'email': 10, 'password': '1', 'password_confirm': '2'},
TypeError
)
#missing_arg
assert microtest.raises(
security.register_user,
{'email': '1', 'password': '2'},
TypeError
)
@microtest.test
def test_valid_registering():
error = security.register_user(email='<EMAIL>', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is None
user = User.query.filter_by(email='<EMAIL>').first()
assert user is not None
assert not user.is_verified
@microtest.test
def test_registering_invalid_email():
error = security.register_user(email='asd', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
@microtest.test
def test_registering_email_in_user():
USED_EMAIL = '<EMAIL>'
user = User(email = USED_EMAIL, password_hash = '')
orm.session.add(user)
error = security.register_user(email=USED_EMAIL, password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
@microtest.test
def test_registering_invalid_password():
error = security.register_user(email='<EMAIL>', password=' <PASSWORD>', password_confirm=' <PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
@microtest.test
def test_registering_invalid_password_confirm():
error = security.register_user(email='<EMAIL>', password='<PASSWORD>', password_confirm='<PASSWORD>')
assert error is not None
assert len(User.query.all()) == 0
| StarcoderdataPython |
11300191 | # -*- coding:utf-8 -*-
import django
from .views import UploadView
if django.VERSION[0] > 1:
from django.urls import re_path as url_func
else:
from django.conf.urls import url as url_func
urlpatterns = [
url_func(r'^uploads/$', UploadView.as_view(), name='uploads'),
] | StarcoderdataPython |
1919590 | #!/bin/env python
import argparse
import logging
import sys
import shutil
from pathlib import Path, PurePath
def read_commit(wkspc_dir) -> str:
"""
read and return the value
in the commit file
:param wkspc_dir: jenkins workspace dicrectory
:return: commit tag value
"""
commit = 'false'
# confirm branch directory exists
vol_dir = '/'.join(['/volumes', PurePath(wkspc_dir).name])
if not Path(vol_dir).exists():
return commit
commit_path = Path('/'.join([vol_dir, 'commit.txt']))
# read commit value and return it.
# or return 'false'
try:
commit = commit_path.read_text()
except Exception:
pass
return commit
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dir')
args = parser.parse_args()
if args.dir:
wkspc_dir = args.dir
commit = read_commit(wkspc_dir=wkspc_dir)
print(f'{commit}')
else:
print(f'false')
| StarcoderdataPython |
9711095 | #! /usr/bin/env python3
# coding: utf-8
from tkinter import Tk, Label, Button, StringVar, Entry, Toplevel, \
messagebox, ttk
from tkinter import *
import tkinter as tk
from database import *
import json
from api import *
from wproduct_sheet import *
class Wsearch_substitute():
# This class is used to search product's substitute
def __init__(self, db, wparent):
self.db_connected = db
self.parent = wparent
self.category_selected = ""
self.nbcateg = 0
self.indexcateg = 0
with open('ressources/connect.json') as f:
self.key = json.load(f)
try:
self.nbcateg = int(self.key.get("nbcateg", ""))
except IOError as e:
print("Error with nbcateg", e)
def show_list_categories(self):
# Display a list of categories on left part and
# a liste of products on the right when we choose
# a category
self.parent.fen = Toplevel()
self.fen = self.parent.fen
self.fen.title("Quel aliment souhaitez-vous remplacer ?")
self.fen.geometry("800x500")
self.fen.rowconfigure(0, weight=0)
for i in range(1, 10):
self.fen.rowconfigure(i, weight=1)
for i in range(6):
self.fen.columnconfigure(i, weight=1)
lbl1 = Label(self.fen, text="Sélectionner une catégorie")
lbl1.grid(row=1, column=1, sticky="sw")
self.fen.listCateg = Listbox(self.fen, selectmode=SINGLE)
self.fen.listCateg.grid(
row=2, column=1, rowspan=6, columnspan=2, sticky="nsew")
self.fen.sbar = ttk.Scrollbar(
self.fen, orient=tk.VERTICAL, command=self.fen.listCateg.yview)
self.fen.sbar.grid(row=2, column=0, rowspan=6, sticky=tk.NS)
self.fen.listCateg.config(yscrollcommand=self.fen.sbar.set)
self.text = StringVar()
self.text.set("Sélectionner un produit")
self.fen.lbl2 = Label(self.fen, textvariable=self.text)
self.fen.lbl2.grid(row=1, column=3, sticky="sw")
self.fen.listProd = Listbox(self.fen, selectmode=SINGLE)
self.fen.listProd.grid(
row=2, column=3, rowspan=6, columnspan=2, sticky="nsew")
self.btnFicheProd = Button(
self.fen, text="Voir la fiche produit",
command=self.show_product_sheet)
self.btnFicheProd.grid(
row=8, column=1, columnspan=1, sticky="nsew")
self.btnRechCateg = Button(
self.fen, text="Rechercher un produit de substitution",
command=self.search_substitut_from_database)
self.btnRechCateg.grid(
row=8, column=2, columnspan=3, sticky="nsew")
# Load list of categories from database or init from API
# if no product found in the database
if (self.list_categories_from_database() == 0):
nb = self.db_connected.create_list_categories_from_api(
self.nbcateg)
messagebox.showinfo("Maj catégories", str(nb) + " catégories ont"
" été importées depuis openfoodfacts.org")
nb = self.db_connected.create_list_products_by_category_from_api()
messagebox.showinfo("Maj produits", str(nb) + " produits ont"
" été importés depuis openfoodfacts.org")
self.list_categories_from_database()
self.fen.listCateg.bind('<<ListboxSelect>>', self.show_products)
self.fen.mainloop()
def show_products(self, event):
# From database
# Display a list of products of the category selected
print("show products ...")
if self.fen.listCateg.curselection() is not ():
index = self.fen.listCateg.curselection()[0]
if index > 0:
self.indexcateg = index
self.list_products_category()
def show_product_sheet(self):
# From database
# Display the product sheet of the product selected
print("show product sheet ...")
index = self.fen.listProd.curselection()[0]
prodsheet = Wproduct_sheet(self.get_product_selected(index))
prodsheet.display_product_sheet()
def list_categories_from_api(self):
# From openfoodfacts
# Display a list of products of the category selected
print("show products ...")
ap = Api()
cpt = 0
for category in ap.list_categories():
self.fen.listCateg.insert(cpt, category)
cpt += 1
print(ap.list_categories())
def list_categories_from_database(self):
# Display a list of categories found in database
print("List categories")
cpt = 0
for category in self.db_connected.load_category():
self.fen.listCateg.insert(cpt, category[0])
cpt += 1
return cpt
def list_products_category(self):
# Display a list of products of the category selected
self.fen.listProd.delete(0, END)
index = self.fen.listCateg.curselection()[0]
self.category_selected = self.fen.listCateg.get(index)
self.text.set("liste des produits de "+self.category_selected)
cpt = 0
for name in self.db_connected.list_products_in_a_category(
self.category_selected):
self.fen.listProd.insert(
cpt, str(cpt) + " : " + name['name'] +
" (" + name['score'] + ")")
cpt += 1
# No product in the database, we will search the openfoodfacts
if cpt == 0:
ap = Api()
for name in ap.list_products_in_a_category(self.category_selected):
self.fen.listProd.insert(
cpt, str(cpt) + " : " + name['name'] + " (" +
name['score'] + ")")
cpt += 1
return cpt
def search_substitut_from_database(self):
# Search product' substitute
print("Finding the substitute product")
selProduct = {}
substituteProduct = {}
substituteFound = False
index = self.fen.listProd.curselection()[0]
selectedProduct = self.fen.listProd.get(index).split(":")
score = selectedProduct[1].split("(")
score = score[1].replace(")", "")
if len(score) == 1:
# Search for the product to replace
selProduct = self.get_product_selected(index)
# Finding the substitute product
for substituteProduct in self.db_connected.list_products_in_a_category(
self.category_selected):
if len(substituteProduct['score']) == 1:
print(ord(substituteProduct['score'].upper()), ord(score.upper()))
if ord(substituteProduct['score'].upper()) < ord(score.upper()):
# substituteProduct = {
# 'code': product['code'],
# 'name': product['name'],
# 'score': product['score'],
# 'store': product['store'],
# 'url': product['url']
# }
substituteFound = True
if messagebox.askyesno(
"info", "Le produit >>" + selectedProduct[1] +
" <<" + "\npeut être substitué par : \n\n \
Nom : " + substituteProduct['name'] + "\n \
Code : " + substituteProduct['code'] + "\n \
Score : " + substituteProduct['score'] + "\n \
Store : " + substituteProduct['store'] + "\n \
URL : " + substituteProduct['url'] + "\n" + "\n \
Souhaitez-vous enregistrer le substitut ?"):
self.db_connected.update_substitut(
selProduct, substituteProduct)
break
if substituteFound is not True:
messagebox.showinfo("", "Aucun substitut n'a été trouvé"
" pour \n>> " + selectedProduct[1] + " <<")
def get_product_selected(self, index):
# Search for the product select in the list
i = -1
selProduct = {}
for product in self.db_connected.list_products_in_a_category(
self.category_selected):
i = i + 1
if i == index:
selProduct = product
break
return selProduct
| StarcoderdataPython |
11288592 | import faulthandler
import atexit
import signal
import traceback
import os
import multiprocessing
# To test with doctest...
if not __package__: # pragma: no cover
LIB_PATH = "."
else:
from .. import LIB_PATH
# Global configuration
DUMP_DIRECTORY = LIB_PATH + "/dump/"
DUMP_CURRENT_PROCESS = DUMP_DIRECTORY + "%(ppid)s/"
DUMP_FILENAME = DUMP_CURRENT_PROCESS + "%(pid)s.dump"
def get_stack(): # pragma: no cover # nothing to test here
"""Returns the current stack trace as a string."""
return traceback.format_exc()
class StackDumper:
"""
Dumps the stack of the current process and all of its threads when it receives a SIGUSR1 signal.
The stack will go in dump/<main process id>/<real process id>.dump file.
You do not need to create this object yourself, instead, use `init_stack_dumper()`
"""
def __init__(self):
try:
os.mkdir(DUMP_DIRECTORY)
except: # pragma: no cover # if it already exists
pass
ppid = os.getpid() if multiprocessing.current_process().name == "MainProcess" else os.getppid()
dir = DUMP_CURRENT_PROCESS % {"ppid": ppid}
try:
os.mkdir(dir)
except: # pragma: no cover # if it already exists
pass
self.fname = DUMP_FILENAME % {"ppid": ppid, "pid": os.getpid()}
self.f = open(self.fname, "w")
self.fname = self.f.name # The actual filename, can change when we patch open for testing
faulthandler.register(signal.SIGUSR1, self.f, chain=False)
atexit.register(self.clean)
def clean(self): # pragma: no cover # not detected since it's called at exit
faulthandler.unregister(signal.SIGUSR1)
if not self.f.closed:
self.f.close()
if os.stat(self.fname).st_size == 0:
os.unlink(self.fname)
def init_stack_dumper():
"""
Initalize the stack dumper for current process.
This method should be called for all subprocesses (but not for threads)
>>> import os
>>> from signal import SIGUSR1
>>> init_stack_dumper()
>>> # From your terminal send the SIGUSR1 signal
>>> # kill -SIGUSR1 <pid>
>>> os.kill(os.getpid(), SIGUSR1) # send the SIGUSR1 signal
>>> print(open(DUMP_FILENAME % dict(ppid=os.getpid(), pid=os.getpid()), "r").read()) # doctest: +ELLIPSIS
Current thread ... (most recent call first):...
"""
StackDumper()
| StarcoderdataPython |
5011522 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
#
__author__ = 'gjp'
from django.conf import settings
from circus import get_arbiter
from subprocess import Popen, PIPE
class environment_controller():
"""This class provides a control over circus launching.
"""
started = False
def start_manager(self):
if settings.SETTINGS_TYPE == 'production':
arbiter = get_arbiter([{"cmd": "python "
"" + settings.ENVIRONMENTS_MANAGER_PATH, "numprocesses": 1}], background=True)
if check_python_process():
clean_environments()
arbiter.start()
def is_started(self):
return self.started
def clean_environments():
cmd = "ps -awx | grep [f]iware_cloto/environments | awk '{print $1}' | xargs kill -9"
output, error = Popen(cmd, shell=True, executable="/bin/bash", stdout=PIPE,
stderr=PIPE).communicate()
if error:
raise Exception(error)
return output
def check_python_process():
p = Popen(['ps', '-awx'], stdout=PIPE)
output, error = p.communicate()
started = False
for line in output.splitlines():
if 'environmentManager.py' in line:
started = True
return started
| StarcoderdataPython |
1840275 | <reponame>MaciejZurek/python_practicing
from poker.validators import (
RoyalFlushValidator,
StraightFlushValidator,
FourOfAKindValidator,
FullHouseValidator,
FlushValidator,
StraightValidator,
ThreeOfAKindValidator,
TwoPairValidator,
PairValidator,
HighCardValidator,
NoCardsValidator
)
class Hand():
VALIDATORS = (
RoyalFlushValidator,
StraightFlushValidator,
FourOfAKindValidator,
FullHouseValidator,
FlushValidator,
StraightValidator,
ThreeOfAKindValidator,
TwoPairValidator,
PairValidator,
HighCardValidator,
NoCardsValidator
)
def __init__(self):
self.cards = []
def __repr__(self):
cards_as_strings = [str(card) for card in self.cards]
return ", ".join(cards_as_strings)
def add_cards(self, cards):
copy = self.cards[:]
copy.extend(cards)
copy.sort()
self.cards = copy
def best_rank(self):
for index, validator_klass in enumerate(self.VALIDATORS):
validator = validator_klass(cards = self.cards)
if validator.is_valid():
return (index, validator.name, validator.valid_cards()) | StarcoderdataPython |
9799664 | #!/usr/bin/python
"""
Temperature & Humidity class
"""
import array
import time
import io
import fcntl
I2C_SLAVE = 0x0703
HTU21D_ADDR = 0x40 # HTU21D default address.
CMD_READ_TEMP_HOLD = "\xE3"
CMD_READ_HUM_HOLD = "\xE5"
CMD_READ_TEMP_NOHOLD = "\xF3"
CMD_READ_HUM_NOHOLD = "\xF5"
CMD_WRITE_USER_REG = "\xE6"
CMD_READ_USER_REG = "\xE7"
CMD_SOFT_RESET = "\xFE"
SHT31_I2CADDR = 0x44 # SHT31D default address
SHT31_SEND_MEASUREMENT = 0x2C # Send measurement command, 0x2C(44)
SHT31_HIGH_REPEATABILITY = 0x06 # 0x06(06) High repeatability measurement
SHT31_READ_DATA = 0x00 # Read data back from 0x00(00), 6 bytes
# noinspection PyMissingOrEmptyDocstring
class I2C(object):
"""
I2C RPI Reader and writer class
"""
def __init__(self, device, bus):
self.fr = io.open("/dev/i2c-" + str(bus), "rb", buffering=0)
self.fw = io.open("/dev/i2c-" + str(bus), "wb", buffering=0)
# set device address
fcntl.ioctl(self.fr, I2C_SLAVE, device)
fcntl.ioctl(self.fw, I2C_SLAVE, device)
def write(self, _bytes):
self.fw.write(_bytes)
def read(self, _bytes):
return self.fr.read(_bytes)
def close(self):
self.fw.close()
self.fr.close()
# noinspection PyMissingOrEmptyDocstring, PyMethodMayBeStatic
class HTU21D(object):
"""
HTU21D-f from Adafruit class
"""
def __init__(self):
self.dev = I2C(HTU21D_ADDR, 1) # HTU21D 0x40, bus 1
self.dev.write(CMD_SOFT_RESET) # soft reset
time.sleep(.1)
def ctemp(self, sensor_temp):
t_sensor_temp = sensor_temp / 65536.0
return -46.85 + (175.72 * t_sensor_temp)
def chumid(self, sensor_humid):
t_sensor_humid = sensor_humid / 65536.0
return -6.0 + (125.0 * t_sensor_humid)
def crc8check(self, value):
# Ported from Sparkfun Arduino HTU21D Library: https://github.com/sparkfun/HTU21D_Breakout
remainder = ((value[0] << 8) + value[1]) << 8
remainder |= value[2]
# POLYNOMIAL = 0x0131 = x^8 + x^5 + x^4 + 1
# divisor = 0x988000 is the 0x0131 polynomial shifted to farthest left of three bytes
divisor = 0x988000
for i in range(0, 16):
if remainder & 1 << (23 - i):
remainder ^= divisor
divisor >>= 1
if remainder == 0:
return True
else:
return False
def read_temperature(self):
self.dev.write(CMD_READ_TEMP_NOHOLD) # measure temp
time.sleep(.1)
data = self.dev.read(3)
buf = array.array('B', data)
if self.crc8check(buf):
temp = (buf[0] << 8 | buf[1]) & 0xFFFC
return self.ctemp(temp)
else:
return -255
def read_humidity(self):
self.dev.write(CMD_READ_HUM_NOHOLD) # measure humidity
time.sleep(.1)
data = self.dev.read(3)
buf = array.array('B', data)
if self.crc8check(buf):
humid = (buf[0] << 8 | buf[1]) & 0xFFFC
return self.chumid(humid)
else:
return -255
class SHT31(object):
"""
SHT31-D (https://www.adafruit.com/products/2857) temperature and Humidity sensor.
This class currently only extracts the temperature and humidity from the sensor.
"""
def __init__(self, _address=SHT31_I2CADDR):
self.bus = None # Get I2C bus
try:
import smbus
self.bus = smbus.SMBus(1)
self.address = _address
except ImportError as err:
print("FATAL ERROR, Could not import SHT31 BUS libraries in tempHumidity_lib.py")
print("Error was [%s]".format(str(err)))
def read_temperature_humidity(self):
"""
Core function which reads the sensors values, being both temperature and humidity.
:return: temperature, humidity
"""
self.bus.write_i2c_block_data(self.address, SHT31_SEND_MEASUREMENT, [SHT31_HIGH_REPEATABILITY])
time.sleep(0.5)
# Read data back from 0x00(00), 6 bytes
# Temp MSB, Temp LSB, Temp CRC, Humidity MSB, Humidity LSB, Humidity CRC
data = self.bus.read_i2c_block_data(self.address, SHT31_READ_DATA, 6)
# Convert the data
raw_temperature = data[0] * 256 + data[1]
temperature = -45 + (175 * raw_temperature / 65535.0)
raw_humidity = data[3] * 256 + data[4]
humidity = 100 * raw_humidity / 0xFFFF # 0xFFFF is equivalent to 65535.0
return temperature, humidity
def read_temperature(self):
"""
Extracts the temperature component from the sensor and returns the value
:return: temperature (celsius)
"""
(temperature, humidity) = self.read_temperature_humidity()
return temperature
def read_humidity(self):
"""
Extracts the humidity component from the sensor and returns the value
:return: humidity
"""
(temperature, humidity) = self.read_temperature_humidity()
return humidity
| StarcoderdataPython |
9779317 | <reponame>robzim/isilon_sdk_python
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_0.api_client import ApiClient
class AuthGroupsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_group_member(self, group_member, group, **kwargs): # noqa: E501
"""create_group_member # noqa: E501
Add a member to the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_group_member(group_member, group, async=True)
>>> result = thread.get()
:param async bool
:param GroupMember group_member: (required)
:param str group: (required)
:param str zone: Filter group members by zone.
:param str provider: Filter group members by provider.
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_group_member_with_http_info(group_member, group, **kwargs) # noqa: E501
else:
(data) = self.create_group_member_with_http_info(group_member, group, **kwargs) # noqa: E501
return data
def create_group_member_with_http_info(self, group_member, group, **kwargs): # noqa: E501
"""create_group_member # noqa: E501
Add a member to the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_group_member_with_http_info(group_member, group, async=True)
>>> result = thread.get()
:param async bool
:param GroupMember group_member: (required)
:param str group: (required)
:param str zone: Filter group members by zone.
:param str provider: Filter group members by provider.
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group_member', 'group', 'zone', 'provider'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_group_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'group_member' is set
if ('group_member' not in params or
params['group_member'] is None):
raise ValueError("Missing the required parameter `group_member` when calling `create_group_member`") # noqa: E501
# verify the required parameter 'group' is set
if ('group' not in params or
params['group'] is None):
raise ValueError("Missing the required parameter `group` when calling `create_group_member`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group' in params:
path_params['Group'] = params['group'] # noqa: E501
query_params = []
if 'zone' in params:
query_params.append(('zone', params['zone'])) # noqa: E501
if 'provider' in params:
query_params.append(('provider', params['provider'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'group_member' in params:
body_params = params['group_member']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/auth/groups/{Group}/members', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateResponse', # noqa: E501
auth_settings=auth_settings,
myAsync=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_group_member(self, group_member_id, group, **kwargs): # noqa: E501
"""delete_group_member # noqa: E501
Remove the member from the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_group_member(group_member_id, group, async=True)
>>> result = thread.get()
:param async bool
:param str group_member_id: Remove the member from the group. (required)
:param str group: (required)
:param str zone: Filter group members by zone.
:param str provider: Filter group members by provider.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_group_member_with_http_info(group_member_id, group, **kwargs) # noqa: E501
else:
(data) = self.delete_group_member_with_http_info(group_member_id, group, **kwargs) # noqa: E501
return data
def delete_group_member_with_http_info(self, group_member_id, group, **kwargs): # noqa: E501
"""delete_group_member # noqa: E501
Remove the member from the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_group_member_with_http_info(group_member_id, group, async=True)
>>> result = thread.get()
:param async bool
:param str group_member_id: Remove the member from the group. (required)
:param str group: (required)
:param str zone: Filter group members by zone.
:param str provider: Filter group members by provider.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group_member_id', 'group', 'zone', 'provider'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_group_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'group_member_id' is set
if ('group_member_id' not in params or
params['group_member_id'] is None):
raise ValueError("Missing the required parameter `group_member_id` when calling `delete_group_member`") # noqa: E501
# verify the required parameter 'group' is set
if ('group' not in params or
params['group'] is None):
raise ValueError("Missing the required parameter `group` when calling `delete_group_member`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group_member_id' in params:
path_params['GroupMemberId'] = params['group_member_id'] # noqa: E501
if 'group' in params:
path_params['Group'] = params['group'] # noqa: E501
query_params = []
if 'zone' in params:
query_params.append(('zone', params['zone'])) # noqa: E501
if 'provider' in params:
query_params.append(('provider', params['provider'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/auth/groups/{Group}/members/{GroupMemberId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
myAsync=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_group_members(self, group, **kwargs): # noqa: E501
"""list_group_members # noqa: E501
List all the members of the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_group_members(group, async=True)
>>> result = thread.get()
:param async bool
:param str group: (required)
:param bool resolve_names: Resolve names of personas.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param int limit: Return no more than this many results at once (see resume).
:param str zone: Filter group members by zone.
:param str provider: Filter group members by provider.
:return: GroupMembers
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_group_members_with_http_info(group, **kwargs) # noqa: E501
else:
(data) = self.list_group_members_with_http_info(group, **kwargs) # noqa: E501
return data
def list_group_members_with_http_info(self, group, **kwargs): # noqa: E501
"""list_group_members # noqa: E501
List all the members of the group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_group_members_with_http_info(group, async=True)
>>> result = thread.get()
:param async bool
:param str group: (required)
:param bool resolve_names: Resolve names of personas.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param int limit: Return no more than this many results at once (see resume).
:param str zone: Filter group members by zone.
:param str provider: Filter group members by provider.
:return: GroupMembers
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['group', 'resolve_names', 'resume', 'limit', 'zone', 'provider'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_group_members" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'group' is set
if ('group' not in params or
params['group'] is None):
raise ValueError("Missing the required parameter `group` when calling `list_group_members`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_group_members`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'group' in params:
path_params['Group'] = params['group'] # noqa: E501
query_params = []
if 'resolve_names' in params:
query_params.append(('resolve_names', params['resolve_names'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'zone' in params:
query_params.append(('zone', params['zone'])) # noqa: E501
if 'provider' in params:
query_params.append(('provider', params['provider'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/auth/groups/{Group}/members', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GroupMembers', # noqa: E501
auth_settings=auth_settings,
myAsync=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
1737133 | <filename>homeassistant/components/august/__init__.py<gh_stars>1-10
"""Support for August devices."""
import asyncio
from datetime import timedelta
from functools import partial
import logging
from august.api import Api, AugustApiHTTPError
from august.authenticator import AuthenticationState, Authenticator, ValidationResult
from requests import RequestException, Session
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
_CONFIGURING = {}
DEFAULT_TIMEOUT = 10
ACTIVITY_FETCH_LIMIT = 10
ACTIVITY_INITIAL_FETCH_LIMIT = 20
CONF_LOGIN_METHOD = "login_method"
CONF_INSTALL_ID = "install_id"
NOTIFICATION_ID = "august_notification"
NOTIFICATION_TITLE = "August Setup"
AUGUST_CONFIG_FILE = ".august.conf"
DATA_AUGUST = "august"
DOMAIN = "august"
DEFAULT_ENTITY_NAMESPACE = "august"
# Limit battery and hardware updates to 1800 seconds
# in order to reduce the number of api requests and
# avoid hitting rate limits
MIN_TIME_BETWEEN_LOCK_DETAIL_UPDATES = timedelta(seconds=1800)
# Doorbells need to update more frequently than locks
# since we get an image from the doorbell api
MIN_TIME_BETWEEN_DOORBELL_STATUS_UPDATES = timedelta(seconds=20)
# Activity needs to be checked more frequently as the
# doorbell motion and rings are included here
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
DEFAULT_SCAN_INTERVAL = timedelta(seconds=10)
LOGIN_METHODS = ["phone", "email"]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_LOGIN_METHOD): vol.In(LOGIN_METHODS),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_INSTALL_ID): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
AUGUST_COMPONENTS = ["camera", "binary_sensor", "lock"]
def request_configuration(hass, config, api, authenticator, token_refresh_lock):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
def august_configuration_callback(data):
"""Run when the configuration callback is called."""
result = authenticator.validate_verification_code(data.get("verification_code"))
if result == ValidationResult.INVALID_VERIFICATION_CODE:
configurator.notify_errors(
_CONFIGURING[DOMAIN], "Invalid verification code"
)
elif result == ValidationResult.VALIDATED:
setup_august(hass, config, api, authenticator, token_refresh_lock)
if DOMAIN not in _CONFIGURING:
authenticator.send_verification_code()
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
login_method = conf.get(CONF_LOGIN_METHOD)
_CONFIGURING[DOMAIN] = configurator.request_config(
NOTIFICATION_TITLE,
august_configuration_callback,
description="Please check your {} ({}) and enter the verification "
"code below".format(login_method, username),
submit_caption="Verify",
fields=[
{"id": "verification_code", "name": "Verification code", "type": "string"}
],
)
def setup_august(hass, config, api, authenticator, token_refresh_lock):
"""Set up the August component."""
authentication = None
try:
authentication = authenticator.authenticate()
except RequestException as ex:
_LOGGER.error("Unable to connect to August service: %s", str(ex))
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
state = authentication.state
if state == AuthenticationState.AUTHENTICATED:
if DOMAIN in _CONFIGURING:
hass.components.configurator.request_done(_CONFIGURING.pop(DOMAIN))
hass.data[DATA_AUGUST] = AugustData(
hass, api, authentication, authenticator, token_refresh_lock
)
for component in AUGUST_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
if state == AuthenticationState.BAD_PASSWORD:
_LOGGER.error("Invalid password provided")
return False
if state == AuthenticationState.REQUIRES_VALIDATION:
request_configuration(hass, config, api, authenticator, token_refresh_lock)
return True
return False
async def async_setup(hass, config):
"""Set up the August component."""
conf = config[DOMAIN]
api_http_session = None
try:
api_http_session = Session()
except RequestException as ex:
_LOGGER.warning("Creating HTTP session failed with: %s", str(ex))
api = Api(timeout=conf.get(CONF_TIMEOUT), http_session=api_http_session)
authenticator = Authenticator(
api,
conf.get(CONF_LOGIN_METHOD),
conf.get(CONF_USERNAME),
conf.get(CONF_PASSWORD),
install_id=conf.get(CONF_INSTALL_ID),
access_token_cache_file=hass.config.path(AUGUST_CONFIG_FILE),
)
def close_http_session(event):
"""Close API sessions used to connect to August."""
_LOGGER.debug("Closing August HTTP sessions")
if api_http_session:
try:
api_http_session.close()
except RequestException:
pass
_LOGGER.debug("August HTTP session closed.")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_http_session)
_LOGGER.debug("Registered for Home Assistant stop event")
token_refresh_lock = asyncio.Lock()
return await hass.async_add_executor_job(
setup_august, hass, config, api, authenticator, token_refresh_lock
)
class AugustData:
"""August data object."""
def __init__(self, hass, api, authentication, authenticator, token_refresh_lock):
"""Init August data object."""
self._hass = hass
self._api = api
self._authenticator = authenticator
self._access_token = authentication.access_token
self._access_token_expires = authentication.access_token_expires
self._token_refresh_lock = token_refresh_lock
self._doorbells = self._api.get_doorbells(self._access_token) or []
self._locks = self._api.get_operable_locks(self._access_token) or []
self._house_ids = set()
for device in self._doorbells + self._locks:
self._house_ids.add(device.house_id)
self._doorbell_detail_by_id = {}
self._lock_detail_by_id = {}
self._activities_by_id = {}
# We check the locks right away so we can
# remove inoperative ones
self._update_locks_detail()
self._filter_inoperative_locks()
@property
def house_ids(self):
"""Return a list of house_ids."""
return self._house_ids
@property
def doorbells(self):
"""Return a list of doorbells."""
return self._doorbells
@property
def locks(self):
"""Return a list of locks."""
return self._locks
async def _async_refresh_access_token_if_needed(self):
"""Refresh the august access token if needed."""
if self._authenticator.should_refresh():
async with self._token_refresh_lock:
await self._hass.async_add_executor_job(self._refresh_access_token)
def _refresh_access_token(self):
refreshed_authentication = self._authenticator.refresh_access_token(force=False)
_LOGGER.info(
"Refreshed august access token. The old token expired at %s, and the new token expires at %s",
self._access_token_expires,
refreshed_authentication.access_token_expires,
)
self._access_token = refreshed_authentication.access_token
self._access_token_expires = refreshed_authentication.access_token_expires
async def async_get_device_activities(self, device_id, *activity_types):
"""Return a list of activities."""
_LOGGER.debug("Getting device activities for %s", device_id)
await self._async_update_device_activities()
activities = self._activities_by_id.get(device_id, [])
if activity_types:
return [a for a in activities if a.activity_type in activity_types]
return activities
async def async_get_latest_device_activity(self, device_id, *activity_types):
"""Return latest activity."""
activities = await self.async_get_device_activities(device_id, *activity_types)
return next(iter(activities or []), None)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def _async_update_device_activities(self, limit=ACTIVITY_FETCH_LIMIT):
"""Update data object with latest from August API."""
# This is the only place we refresh the api token
await self._async_refresh_access_token_if_needed()
return await self._hass.async_add_executor_job(
partial(self._update_device_activities, limit=ACTIVITY_FETCH_LIMIT)
)
def _update_device_activities(self, limit=ACTIVITY_FETCH_LIMIT):
_LOGGER.debug("Start retrieving device activities")
for house_id in self.house_ids:
_LOGGER.debug("Updating device activity for house id %s", house_id)
activities = self._api.get_house_activities(
self._access_token, house_id, limit=limit
)
device_ids = {a.device_id for a in activities}
for device_id in device_ids:
self._activities_by_id[device_id] = [
a for a in activities if a.device_id == device_id
]
_LOGGER.debug("Completed retrieving device activities")
async def async_get_doorbell_detail(self, doorbell_id):
"""Return doorbell detail."""
await self._async_update_doorbells()
return self._doorbell_detail_by_id.get(doorbell_id)
@Throttle(MIN_TIME_BETWEEN_DOORBELL_STATUS_UPDATES)
async def _async_update_doorbells(self):
await self._hass.async_add_executor_job(self._update_doorbells)
def _update_doorbells(self):
detail_by_id = {}
_LOGGER.debug("Start retrieving doorbell details")
for doorbell in self._doorbells:
_LOGGER.debug("Updating doorbell status for %s", doorbell.device_name)
try:
detail_by_id[doorbell.device_id] = self._api.get_doorbell_detail(
self._access_token, doorbell.device_id
)
except RequestException as ex:
_LOGGER.error(
"Request error trying to retrieve doorbell status for %s. %s",
doorbell.device_name,
ex,
)
detail_by_id[doorbell.device_id] = None
except Exception:
detail_by_id[doorbell.device_id] = None
raise
_LOGGER.debug("Completed retrieving doorbell details")
self._doorbell_detail_by_id = detail_by_id
def update_door_state(self, lock_id, door_state, update_start_time_utc):
"""Set the door status and last status update time.
This is called when newer activity is detected on the activity feed
in order to keep the internal data in sync
"""
# When syncing the door state became available via py-august, this
# function caused to be actively used. It will be again as we will
# update the door state from lock/unlock operations as the august api
# does report the door state on lock/unlock, however py-august does not
# expose this to us yet.
self._lock_detail_by_id[lock_id].door_state = door_state
self._lock_detail_by_id[lock_id].door_state_datetime = update_start_time_utc
return True
def update_lock_status(self, lock_id, lock_status, update_start_time_utc):
"""Set the lock status and last status update time.
This is used when the lock, unlock apis are called
or newer activity is detected on the activity feed
in order to keep the internal data in sync
"""
self._lock_detail_by_id[lock_id].lock_status = lock_status
self._lock_detail_by_id[lock_id].lock_status_datetime = update_start_time_utc
return True
def lock_has_doorsense(self, lock_id):
"""Determine if a lock has doorsense installed and can tell when the door is open or closed."""
# We do not update here since this is not expected
# to change until restart
if self._lock_detail_by_id[lock_id] is None:
return False
return self._lock_detail_by_id[lock_id].doorsense
async def async_get_lock_detail(self, lock_id):
"""Return lock detail."""
await self._async_update_locks_detail()
return self._lock_detail_by_id[lock_id]
def get_lock_name(self, device_id):
"""Return lock name as August has it stored."""
for lock in self._locks:
if lock.device_id == device_id:
return lock.device_name
@Throttle(MIN_TIME_BETWEEN_LOCK_DETAIL_UPDATES)
async def _async_update_locks_detail(self):
await self._hass.async_add_executor_job(self._update_locks_detail)
def _update_locks_detail(self):
detail_by_id = {}
_LOGGER.debug("Start retrieving locks detail")
for lock in self._locks:
try:
detail_by_id[lock.device_id] = self._api.get_lock_detail(
self._access_token, lock.device_id
)
except RequestException as ex:
_LOGGER.error(
"Request error trying to retrieve door details for %s. %s",
lock.device_name,
ex,
)
detail_by_id[lock.device_id] = None
except Exception:
detail_by_id[lock.device_id] = None
raise
_LOGGER.debug("Completed retrieving locks detail")
self._lock_detail_by_id = detail_by_id
def lock(self, device_id):
"""Lock the device."""
return _call_api_operation_that_requires_bridge(
self.get_lock_name(device_id),
"lock",
self._api.lock,
self._access_token,
device_id,
)
def unlock(self, device_id):
"""Unlock the device."""
return _call_api_operation_that_requires_bridge(
self.get_lock_name(device_id),
"unlock",
self._api.unlock,
self._access_token,
device_id,
)
def _filter_inoperative_locks(self):
# Remove non-operative locks as there must
# be a bridge (August Connect) for them to
# be usable
operative_locks = []
for lock in self._locks:
lock_detail = self._lock_detail_by_id.get(lock.device_id)
if lock_detail is None:
_LOGGER.info(
"The lock %s could not be setup because the system could not fetch details about the lock.",
lock.device_name,
)
elif lock_detail.bridge is None:
_LOGGER.info(
"The lock %s could not be setup because it does not have a bridge (Connect).",
lock.device_name,
)
elif not lock_detail.bridge.operative:
_LOGGER.info(
"The lock %s could not be setup because the bridge (Connect) is not operative.",
lock.device_name,
)
else:
operative_locks.append(lock)
self._locks = operative_locks
def _call_api_operation_that_requires_bridge(
device_name, operation_name, func, *args, **kwargs
):
"""Call an API that requires the bridge to be online."""
ret = None
try:
ret = func(*args, **kwargs)
except AugustApiHTTPError as err:
raise HomeAssistantError(device_name + ": " + str(err))
return ret
| StarcoderdataPython |
4915689 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Sun Feb 19 13:53:24 2012
import wx
# begin wxGlade: extracode
from pyoptools.gui.glplotframe import glCanvas
# end wxGlade
class OGLFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: OGLFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.glcanvas = glCanvas(self)
self.label_1 = wx.StaticText(self, -1, "Translate")
self.trUp = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-up.png", wx.BITMAP_TYPE_ANY))
self.trLeft = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-left.png", wx.BITMAP_TYPE_ANY))
self.trRight = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-right.png", wx.BITMAP_TYPE_ANY))
self.trDown = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-down.png", wx.BITMAP_TYPE_ANY))
self.label_2 = wx.StaticText(self, -1, "Rotate")
self.rotUp = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-up.png", wx.BITMAP_TYPE_ANY))
self.rotLeft = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-left.png", wx.BITMAP_TYPE_ANY))
self.rotRight = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-right.png", wx.BITMAP_TYPE_ANY))
self.rotDown = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-down.png", wx.BITMAP_TYPE_ANY))
self.label_3 = wx.StaticText(self, -1, "Spin")
self.spCCW = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-left.png", wx.BITMAP_TYPE_ANY))
self.spCW = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-right.png", wx.BITMAP_TYPE_ANY))
self.label_4 = wx.StaticText(self, -1, "Zoom")
self.zmIn = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-left.png", wx.BITMAP_TYPE_ANY))
self.zmOut = wx.BitmapButton(self, -1, wx.Bitmap("/usr/share/icons/oxygen/32x32/actions/arrow-right.png", wx.BITMAP_TYPE_ANY))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.OnUp, self.trUp)
self.Bind(wx.EVT_BUTTON, self.OnLeft, self.trLeft)
self.Bind(wx.EVT_BUTTON, self.OnRight, self.trRight)
self.Bind(wx.EVT_BUTTON, self.OnDown, self.trDown)
self.Bind(wx.EVT_BUTTON, self.OnSpUp, self.rotUp)
self.Bind(wx.EVT_BUTTON, self.OnSpLeft, self.rotLeft)
self.Bind(wx.EVT_BUTTON, self.OnSpRight, self.rotRight)
self.Bind(wx.EVT_BUTTON, self.OnSpDown, self.rotDown)
self.Bind(wx.EVT_BUTTON, self.OnSpCCW, self.spCCW)
self.Bind(wx.EVT_BUTTON, self.OnSpCW, self.spCW)
self.Bind(wx.EVT_BUTTON, self.OnZmOut, self.zmIn)
self.Bind(wx.EVT_BUTTON, self.OnZmIn, self.zmOut)
# end wxGlade
def __set_properties(self):
# begin wxGlade: OGLFrame.__set_properties
self.SetTitle("pyoptools Optical System Preview")
self.glcanvas.SetMinSize((640,480))
self.trUp.SetSize(self.trUp.GetBestSize())
self.trLeft.SetSize(self.trLeft.GetBestSize())
self.trRight.SetSize(self.trRight.GetBestSize())
self.trDown.SetSize(self.trDown.GetBestSize())
self.rotUp.SetSize(self.rotUp.GetBestSize())
self.rotLeft.SetSize(self.rotLeft.GetBestSize())
self.rotRight.SetSize(self.rotRight.GetBestSize())
self.rotDown.SetSize(self.rotDown.GetBestSize())
self.spCCW.SetSize(self.spCCW.GetBestSize())
self.spCW.SetSize(self.spCW.GetBestSize())
self.zmIn.SetSize(self.zmIn.GetBestSize())
self.zmOut.SetSize(self.zmOut.GetBestSize())
# end wxGlade
def __do_layout(self):
# begin wxGlade: OGLFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_3_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_1_copy = wx.GridSizer(3, 3, 0, 0)
grid_sizer_1 = wx.GridSizer(3, 3, 0, 0)
sizer_2.Add(self.glcanvas, 1, wx.EXPAND, 0)
sizer_4.Add(self.label_1, 0, 0, 0)
grid_sizer_1.Add((32, 32), 0, 0, 0)
grid_sizer_1.Add(self.trUp, 0, 0, 0)
grid_sizer_1.Add((32, 32), 0, 0, 0)
grid_sizer_1.Add(self.trLeft, 0, 0, 0)
grid_sizer_1.Add((32, 32), 0, 0, 0)
grid_sizer_1.Add(self.trRight, 0, 0, 0)
grid_sizer_1.Add((32, 32), 0, 0, 0)
grid_sizer_1.Add(self.trDown, 0, 0, 0)
grid_sizer_1.Add((32, 32), 0, 0, 0)
sizer_4.Add(grid_sizer_1, 0, wx.EXPAND, 12)
sizer_4.Add(self.label_2, 0, 0, 0)
grid_sizer_1_copy.Add((32, 32), 0, 0, 0)
grid_sizer_1_copy.Add(self.rotUp, 0, 0, 0)
grid_sizer_1_copy.Add((32, 32), 0, 0, 0)
grid_sizer_1_copy.Add(self.rotLeft, 0, 0, 0)
grid_sizer_1_copy.Add((32, 32), 0, 0, 0)
grid_sizer_1_copy.Add(self.rotRight, 0, 0, 0)
grid_sizer_1_copy.Add((32, 32), 0, 0, 0)
grid_sizer_1_copy.Add(self.rotDown, 0, 0, 0)
grid_sizer_1_copy.Add((32, 32), 0, 0, 0)
sizer_4.Add(grid_sizer_1_copy, 0, wx.EXPAND, 12)
sizer_4.Add(self.label_3, 0, 0, 0)
sizer_3.Add(self.spCCW, 0, 0, 0)
sizer_3.Add((32, 32), 0, 0, 0)
sizer_3.Add(self.spCW, 0, 0, 0)
sizer_4.Add(sizer_3, 0, wx.EXPAND, 0)
sizer_4.Add(self.label_4, 0, 0, 0)
sizer_3_copy.Add(self.zmIn, 0, 0, 0)
sizer_3_copy.Add((32, 32), 0, 0, 0)
sizer_3_copy.Add(self.zmOut, 0, 0, 0)
sizer_4.Add(sizer_3_copy, 0, wx.EXPAND, 0)
sizer_2.Add(sizer_4, 0, wx.EXPAND, 0)
sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
def OnUp(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnUp' not implemented!"
event.Skip()
def OnLeft(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnLeft' not implemented!"
event.Skip()
def OnRight(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnRight' not implemented!"
event.Skip()
def OnDown(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnDown' not implemented!"
event.Skip()
def OnSpUp(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnSpUp' not implemented!"
event.Skip()
def OnSpLeft(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnSpLeft' not implemented!"
event.Skip()
def OnSpRight(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnSpRight' not implemented!"
event.Skip()
def OnSpDown(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnSpDown' not implemented!"
event.Skip()
def OnSpCCW(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnSpCCW' not implemented!"
event.Skip()
def OnSpCW(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnSpCW' not implemented!"
event.Skip()
def OnZmOut(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnZmOut' not implemented!"
event.Skip()
def OnZmIn(self, event): # wxGlade: OGLFrame.<event_handler>
print "Event handler `OnZmIn' not implemented!"
event.Skip()
# end of class OGLFrame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
oglFrame = OGLFrame(None, -1, "")
app.SetTopWindow(oglFrame)
oglFrame.Show()
app.MainLoop()
| StarcoderdataPython |
3231139 | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Release a new version of rez.
Read RELEASE.md before using this utility.
"""
from __future__ import print_function
import argparse
import os
from datetime import date
from getpass import getpass
from pipes import quote
import subprocess
import sys
try:
import requests
except ImportError:
sys.stderr.write("Requires python requests module.\n")
sys.exit(1)
source_path = os.path.dirname(os.path.realpath(__file__))
src_path = os.path.join(source_path, "src")
sys.path.insert(0, src_path)
from rez.utils._version import _rez_version
github_baseurl = "github.com/repos/nerdvegas/rez"
github_baseurl2 = "github.com/nerdvegas/rez"
verbose = False
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
# requires 'public_repo' access
#
github_token = os.environ["GITHUB_RELEASE_REZ_TOKEN"]
def run_command(*nargs):
if verbose:
print("RUNNING: %s" % ' '.join(map(quote, nargs)))
proc = subprocess.Popen(nargs, stdout=subprocess.PIPE)
out, _ = proc.communicate()
if proc.returncode:
sys.stderr.write("Aborting, failed with exit code %d.\n" % proc.returncode)
sys.exit(proc.returncode)
return out.strip()
def github_request(method, endpoint, headers=None, **kwargs):
url = "https://api.%s/%s" % (github_baseurl, endpoint)
headers = (headers or {}).copy()
headers["Authorization"] = "token " + github_token
return requests.request(method, url, headers=headers, **kwargs)
def parse_topmost_changelog():
result = {}
body_lines = []
with open("CHANGELOG.md") as f:
for line in f.readlines():
parts = line.split()
# eg: ## 2.38.0 (2019-07-20)
if parts and parts[0] == "##":
if result.get("version"):
result["body"] = ''.join(body_lines).strip()
return result
result["version"] = parts[1]
result["name"] = ' '.join(parts[1:])
elif result.get("version"):
# GitHub seems to treat separate lines in the md as line breaks,
# rather than keeping them in the same paragraph as a typical
# md renderer would. So patch that up here.
#
br_chars = ('*', '-', '#', '[')
if body_lines and \
body_lines[-1].strip() and \
body_lines[-1][0] not in br_chars and \
line.strip() and \
line[0] not in br_chars:
# append to previous line
body_lines[-1] = body_lines[-1].rstrip() + ' ' + line
else:
body_lines.append(line)
# should never get here
assert False
def check_on_master():
branch = run_command("git", "branch", "--contains").split()[-1]
if branch != "master":
sys.stderr.write("Must be run from master.\n")
sys.exit(1)
def push_codebase():
run_command("git", "push")
def create_and_push_tag():
run_command("git", "tag", _rez_version)
run_command("git", "push", "origin", _rez_version)
def create_github_release_notes():
# check if latest release notes already match current version
response = github_request("get", "releases/latest")
response.raise_for_status()
latest_release_tag = response.json()["tag_name"]
if latest_release_tag == _rez_version:
sys.stderr.write("Release for %s already exists.\n" % _rez_version)
sys.exit(1)
# parse latest release out of changelog
changelog = parse_topmost_changelog()
tag_name = changelog["version"]
if tag_name != _rez_version:
sys.stderr.write(
"Latest entry in changelog (%s) doesn't match current version (%s).\n"
% (tag_name, _rez_version)
)
sys.exit(1)
data = dict(
tag_name=_rez_version,
name=changelog["name"],
body=changelog["body"]
)
# create the release on github
response = github_request(
"post",
"releases",
json=data,
headers={"Content-Type": "application/json"}
)
response.raise_for_status()
url = "https://%s/releases/tag/%s" % (github_baseurl, _rez_version)
print("Created release notes: " + url)
def generate_changelog_entry(issue_nums):
# parse previous release out of changelog
changelog = parse_topmost_changelog()
previous_version = changelog["version"]
if previous_version == _rez_version:
sys.stderr.write(
"Latest entry in changelog (%s) matches current version (%s).\n"
% (previous_version, _rez_version)
)
sys.exit(1)
# get issues and PRs from cli
pr_lines = []
issue_lines = []
for issue_num in sorted(issue_nums):
# note that 'issues' endpoint also returns PRs
response = github_request(
"get",
"issues/%d" % issue_num,
headers={"Content-Type": "application/json"}
)
if response.status_code == 404:
sys.stderr.write("Issue/PR does not exist: %d\n" % issue_num)
sys.exit(1)
response.raise_for_status()
data = response.json()
url = data["html_url"]
user = data["user"]
title = data["title"]
title = title.lstrip('-')
if "pull_request" in data:
pr_lines.append(
"- %s [\\#%d](%s) ([%s](%s))"
% (title, issue_num, url, user["login"], user["html_url"])
)
else:
# issue
issue_lines.append(
"- %s [\\#%d](%s)"
% (title, issue_num, url)
)
# print title section
today = date.today()
print(
"## %s (%d-%02d-%02d)" %
(_rez_version, today.year, today.month, today.day)
)
print(
"[Source](https://%s/tree/%s) | [Diff](https://%s/compare/%s...%s)" %
(github_baseurl2, _rez_version, github_baseurl2, previous_version, _rez_version)
)
print("")
# print PRs and issues
if pr_lines:
print(
"**Merged pull requests:**\n\n" +
"\n".join(pr_lines)
)
if issue_lines:
if pr_lines:
print("")
print(
"**Closed issues:**\n\n" +
"\n".join(issue_lines)
)
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-s", "--step", choices=("push", "tag", "release_notes"),
help="Just run one step of the release process")
parser.add_argument(
"-c", "--changelog", nargs='*', metavar="ISSUE", type=int,
help="Generate changelog entry to be added to CHANGELOG.md")
parser.add_argument(
"-v", "--verbose", action="store_true",
help="Verbose mode")
opts = parser.parse_args()
verbose = opts.verbose
if opts.changelog is not None:
issue_nums = opts.changelog
generate_changelog_entry(issue_nums)
sys.exit(0)
print("Releasing rez-%s..." % _rez_version)
def doit(step):
return (opts.step is None) or (step == opts.step)
check_on_master()
if doit("push"):
push_codebase()
if doit("tag"):
create_and_push_tag()
if doit("release_notes"):
create_github_release_notes()
| StarcoderdataPython |
9781426 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am a support module for making SOCKSv4 servers with twistd.
"""
from twisted.protocols import socks
from twisted.python import usage
from twisted.application import internet
import sys
class Options(usage.Options):
synopsis = "[-i <interface>] [-p <port>] [-l <file>]"
optParameters = [["interface", "i", "127.0.0.1", "local interface to which we listen"],
["port", "p", 1080, "Port on which to listen"],
["log", "l", None, "file to log connection data to"]]
compData = usage.Completions(
optActions={"log": usage.CompleteFiles("*.log"),
"interface": usage.CompleteNetInterfaces()}
)
longdesc = "Makes a SOCKSv4 server."
def makeService(config):
if config["interface"] != "127.0.0.1":
print
print "WARNING:"
print " You have chosen to listen on a non-local interface."
print " This may allow intruders to access your local network"
print " if you run this on a firewall."
print
t = socks.SOCKSv4Factory(config['log'])
portno = int(config['port'])
return internet.TCPServer(portno, t, interface=config['interface'])
| StarcoderdataPython |
1970022 | """CHAPTER 30
WIND LOADS: COMPONENTS AND CLADDING
"""
from types import SimpleNamespace
from asce7.common import Deg, Log
#############################################################
# 30.3.2 Design Wind Pressures
#############################################################
def eq30p3d1_GCp_interpolant(figure):
return EQ30P3D1_FIGURES_GCp_LOOKUP[figure]
# Fig. 30.3-1 Components and Cladding [h ≤ 60 ft (h ≤ 18.3 m)]: External Pressure Coefficients, (GCp ),
# for Enclosed and Partially Enclosed Buildings—Walls
# TODO
# Figs. 30.3-2A–I (flat roofs, gable roofs and hip roofs)
# Fig. 30.3-2A Components and Cladding [h ≤ 60 ft (h ≤ 18.3 m)]: External Pressure Coefficients, (GCp), for Enclosed
# and Partially Enclosed Buildings—Gable Roofs, θ ≤ 7°
_FIG30P3D2A_GCp_ROOF_STR = """
0 deg to 7 deg Roof Slope
Zone 1' 1 2 3 Down
area (sq ft) 1 10 100 2000
GCp 0.3 0.3 0.2 0.2
Zone 1' Up
area (sq ft) 1 100 1000 2000
GCp -0.9 -0.9 -0.4 -0.4
Zone 1 Up
area (sq ft) 1 10 500 2000
GCp -1.7 -1.7 -1.0 -1.0
Zone 2 Up
area (sq ft) 1 10 500 2000
GCp -2.1 -2.1 -1.4 -1.4
Zone 3 Up
area (sq ft) 1 10 500 2000
GCp -3.2 -3.2 -1.4 -1.4
"""[1:-1]
FIG30P3D2A_GCp_ROOF_NS = SimpleNamespace()
FIG30P3D2A_GCp_ROOF_NS.roof_type = "gable"
FIG30P3D2A_GCp_ROOF_NS.roof_slope = (Deg(0), Deg(7))
FIG30P3D2A_GCp_ROOF_NS.location = ("roof", "overhang")
FIG30P3D2A_GCp_ROOF_NS.zone = ("1'", "1", "2", "3")
FIG30P3D2A_GCp_ROOF_DICT = {
# TODO
}
FIG30P3D2A_GCp_DICT = {
"roof": FIG30P3D2A_GCp_ROOF_DICT,
# TODO: overhang figure
}
def filter29p4p4(h):
"""Section 30.3.2 Design Wind Pressures
h (ft)
"""
return h <= 60
def fig30p3d2A_GCp(location, zone, A):
"""For Equation 30.3-1: Design wind pressures on C&C elements of low-rise buildings and buildings with h ≤ 60 ft
From Figure 30.3-2A: Components and Cladding [h ≤ 60 ft (h ≤ 18.3 m)]: External Pressure Coefficients, (GCp),
for Enclosed and Partially Enclosed Buildings—Gable Roofs, θ ≤ 7°
"""
return FIG30P3D2A_GCp_DICT[location][zone](Log(A))
def fig30p3d2A_zone_check(d1, d2, h):
"""From Figure 30.3-2A: zone definition figure for Zone 1', 1, 2, 3
d1, d2: distances (ft) from two nearest building edges
h: building height (ft) to eaves
"""
... # TODO
# Fig. 30.3-3 (stepped roofs)
# TODO
# Fig. 30.3-4 (multispan gable roofs)
# TODO
# Figs. 30.3-5A–B (monoslope roofs)
# TODO
# Fig. 30.3-6 (sawtooth roofs)
# TODO
# Fig. 30.3-7 (domed roofs)
# TODO
# Fig. 27.3-3, Note 4 (arched roofs)
# TODO
# Fig. 27.3-3, Note 4 (arched roofs)
# TODO
# LOOKUP FOR ALL CHAPTER 30 GCp FIGURES
# TODO
EQ30P3D1_FIGURES_GCp_LOOKUP = dict(zip(
("walls", "flat", "gable", "hip", "stepped", "multispan gable", "monoslope", "sawtooth", "domed", "arched"),
(NotImplemented,)*2 + (fig30p3d2A_GCp,)*3 + (NotImplemented,)*5
))
| StarcoderdataPython |
1838964 | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return render(request, "index.html")
#登录动作
def login_action(request):
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
if username == 'admin' and password == '<PASSWORD>':
return HttpResponse('login success!')
else:
return render(request, 'index.html', {'error': 'usename or password error!'}) | StarcoderdataPython |
1955538 | <reponame>skyscooby/sahasrahbot
import os
from alttprbot.alttprgen.preset import get_preset
from alttprbot.tournament.core import TournamentConfig
from alttprbot_discord.bot import discordbot
from .sglcore import SGLRandomizerTournamentRace
class SMZ3(SGLRandomizerTournamentRace):
async def configuration(self):
guild = discordbot.get_guild(590331405624410116)
return TournamentConfig(
guild=guild,
racetime_category='sgl',
racetime_goal="Super Metroid Link to the Past Combo Randomizer",
event_slug="sgl21smz3",
audit_channel=discordbot.get_channel(772351829022474260),
commentary_channel=discordbot.get_channel(631564559018098698),
coop=False,
gsheet_id=os.environ.get("SGL_RESULTS_SHEET"),
auto_record=True
)
async def roll(self):
self.seed, self.preset_dict = await get_preset('normal', tournament=True, randomizer='smz3')
@property
def seed_info(self):
return f"{self.seed.url} - {self.seed.code}"
| StarcoderdataPython |
5178905 | <gh_stars>1-10
"""
(c) Copyright Fair Isaac Corporation 2017. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
THIS FILE IS DEPRECATED AND MAY BE REMOVED WITHOUT WARNING!
DO NOT CALL THESE FUNCTIONS IN YOUR CODE!
WE ARE MOVING ALL SOLVER INTERFACES TO THE REDUCTIONS FOLDER.
"""
import cvxpy.settings as s
import cvxpy.interface as intf
import cvxpy.lin_ops.lin_utils as linutils
from cvxpy.problems.solvers.solver import Solver
import numpy
def makeMstart(A, n, ifCol):
# Construct mstart using nonzero column indices in A
mstart = numpy.bincount(A.nonzero()[ifCol])
mstart = numpy.concatenate((numpy.array([0], dtype=numpy.int64),
mstart,
numpy.array([0] * (n - len(mstart)), dtype=numpy.int64)))
mstart = numpy.cumsum(mstart)
return mstart
class XPRESS(Solver):
"""
Interface for the FICO Xpress-Optimizer.
Uses its Python interface to exchange data
with CVXPY.
"""
# Main member of this class: an Xpress problem. Marked with a
# trailing "_" to denote a member
prob_ = None
translate_back_QP_ = False
# Solver capabilities.
LP_CAPABLE = True
SOCP_CAPABLE = True
MIP_CAPABLE = True
SDP_CAPABLE = False
EXP_CAPABLE = False
solvecount = 0
version = -1
def name(self):
"""The name of the solver.
"""
return s.XPRESS
def import_solver(self):
"""Imports the solver.
"""
import xpress
self.version = xpress.getversion()
def matrix_intf(self):
"""The interface for matrices passed to the solver.
"""
return intf.DEFAULT_SPARSE_INTF # receive a sparse (CSC) matrix
def vec_intf(self):
"""The interface for vectors passed to the solver.
"""
return intf.DEFAULT_INTF
def split_constr(self, constr_map):
"""Extracts the equality, inequality, and nonlinear constraints.
Parameters
----------
constr_map : dict
A dict of the canonicalized constraints.
Returns
-------
tuple
(eq_constr, ineq_constr, nonlin_constr)
"""
return (constr_map[s.EQ] + constr_map[s.LEQ], [], [])
def get_status_maps(self):
"""
Create status maps from Xpress to CVXPY
"""
import xpress
# Map of Xpress' LP status to CVXPY status.
status_map_lp = {
xpress.lp_unstarted: s.SOLVER_ERROR,
xpress.lp_optimal: s.OPTIMAL,
xpress.lp_infeas: s.INFEASIBLE,
xpress.lp_cutoff: s.OPTIMAL_INACCURATE,
xpress.lp_unfinished: s.OPTIMAL_INACCURATE,
xpress.lp_unbounded: s.UNBOUNDED,
xpress.lp_cutoff_in_dual: s.OPTIMAL_INACCURATE,
xpress.lp_unsolved: s.OPTIMAL_INACCURATE,
xpress.lp_nonconvex: s.SOLVER_ERROR
}
# Same map, for MIPs
status_map_mip = {
xpress.mip_not_loaded: s.SOLVER_ERROR,
xpress.mip_lp_not_optimal: s.SOLVER_ERROR,
xpress.mip_lp_optimal: s.SOLVER_ERROR,
xpress.mip_no_sol_found: s.SOLVER_ERROR,
xpress.mip_solution: s.OPTIMAL_INACCURATE,
xpress.mip_infeas: s.INFEASIBLE,
xpress.mip_optimal: s.OPTIMAL,
xpress.mip_unbounded: s.UNBOUNDED
}
return (status_map_lp, status_map_mip)
def solve(self, objective, constraints, cached_data,
warm_start, verbose, solver_opts):
"""Returns the result of the call to the solver.
Parameters
----------
objective : LinOp
The canonicalized objective.
constraints : list
The list of canonicalized cosntraints.
cached_data : dict
A map of solver name to cached problem data.
warm_start : bool
Should the previous solver result be used to warm_start?
verbose : bool
Should the solver print output?
solver_opts : dict
Additional arguments for the solver.
Returns
-------
tuple
(status, optimal value, primal, equality dual, inequality dual)
"""
import xpress
verbose = True
# Get problem data
data = super(XPRESS, self).get_problem_data(objective, constraints, cached_data)
origprob = None
if 'original_problem' in solver_opts.keys():
origprob = solver_opts['original_problem']
if 'no_qp_reduction' in solver_opts.keys() and solver_opts['no_qp_reduction'] is True:
self.translate_back_QP_ = True
c = data[s.C] # objective coefficients
dims = data[s.DIMS] # contains number of columns, rows, etc.
nrowsEQ = dims[s.EQ_DIM]
nrowsLEQ = dims[s.LEQ_DIM]
nrows = nrowsEQ + nrowsLEQ
# linear constraints
b = data[s.B][:nrows] # right-hand side
A = data[s.A][:nrows] # coefficient matrix
data[s.BOOL_IDX] = solver_opts[s.BOOL_IDX]
data[s.INT_IDX] = solver_opts[s.INT_IDX]
n = c.shape[0] # number of variables
solver_cache = cached_data[self.name()]
###########################################################################################
# Allow warm start if all dimensions match, i.e., if the
# modified problem has the same number of rows/column and the
# same list of cone sizes. Failing that, we can just take the
# standard route and build the problem from scratch.
if warm_start and \
solver_cache.prev_result is not None and \
n == len(solver_cache.prev_result['obj']) and \
nrows == len(solver_cache.prev_result['rhs']) and \
data[s.DIMS][s.SOC_DIM] == solver_cache.prev_result['cone_ind']:
# We are re-solving a problem that was previously solved
# Initialize the problem as the same as the previous solve
self.prob_ = solver_cache.prev_result['problem']
c0 = solver_cache.prev_result['obj']
A0 = solver_cache.prev_result['mat']
b0 = solver_cache.prev_result['rhs']
vartype0 = solver_cache.prev_result['vartype']
# If there is a parameter in the objective, it may have changed.
if len(linutils.get_expr_params(objective)) > 0:
dci = numpy.where(c != c0)[0]
self.prob_.chgobj(dci, c[dci])
# Get equality and inequality constraints.
sym_data = self.get_sym_data(objective, constraints, cached_data)
all_constrs, _, _ = self.split_constr(sym_data.constr_map)
# If there is a parameter in the constraints,
# A or b may have changed.
if any(len(linutils.get_expr_params(con.expr)) > 0 for con in constraints):
dAi = (A != A0).tocoo() # retrieves row/col nonzeros as a tuple of two arrays
dbi = numpy.where(b != b0)[0]
if dAi.getnnz() > 0:
self.prob_.chgmcoef(dAi.row, dAi.col,
[A[i, j] for (i, j) in list(zip(dAi.row, dAi.col))])
if len(dbi) > 0:
self.prob_.chgrhs(dbi, b[dbi])
vartype = []
self.prob_.getcoltype(vartype, 0, len(data[s.C]) - 1)
vti = (numpy.array(vartype) != numpy.array(vartype0))
if any(vti):
self.prob_.chgcoltype(numpy.arange(len(c))[vti], vartype[vti])
############################################################################################
else:
# No warm start, create problem from scratch
# Problem
self.prob_ = xpress.problem()
mstart = makeMstart(A, len(c), 1)
varGroups = {} # If origprob is passed, used to tie IIS to original constraints
transf2Orig = {} # Ties transformation constraints to originals via varGroups
nOrigVar = len(c)
# From a summary knowledge of origprob.constraints() and
# the constraints list, the following seems to hold:
#
# 1) origprob.constraints is the list as generated by the
# user. origprob.constraints[i].size returns the number
# of actual rows in each constraint, while .constr_id
# returns its id (not necessarily numbered from 0).
#
# 2) constraints is also a list whose every element
# contains fields size and constr_id. These correspond
# to the items in origprob.constraints, though the list
# is in not in order of constr_id. Also, given that it
# refers to the transformed problem, it contains extra
# constraints deriving from the cone transformations,
# all with a constr_id and a size.
#
# Given this information, attempt to set names in varnames
# and linRownames so that they can be easily identified
# Load linear part of the problem.
if origprob is not None:
# The original problem was passed, we can take a
# better guess at the constraints and variable names.
nOrigVar = 0
orig_id = [i.id for i in origprob.constraints]
varnames = []
for v in origprob.variables():
nOrigVar += v.size[0]
if v.size[0] == 1:
varnames.append('{0}'. format(v.var_id))
else:
varnames.extend(['{0}_{1:d}'. format(v.var_id, j)
for j in range(v.size[0])])
varnames.extend(['aux_{0:d}'.format(i) for i in range(len(varnames), len(c))])
# Construct constraint name list by checking constr_id for each
linRownames = []
for con in constraints:
if con.constr_id in orig_id:
prefix = ''
if type(con.constr_id) == int:
prefix = 'row_'
if con.size[0] == 1:
name = '{0}{1}'.format(prefix, con.constr_id)
linRownames.append(name)
transf2Orig[name] = con.constr_id
else:
names = ['{0}{1}_{2:d}'.format(prefix, con.constr_id, j)
for j in range(con.size[0])]
linRownames.extend(names)
for i in names:
transf2Orig[i] = con.constr_id
# Tie auxiliary variables to constraints. Scan all
# auxiliary variables in the objective function and in
# the corresponding columns of A.indices
iObjQuad = 0 # keeps track of quadratic quantities in the objective
for i in range(nOrigVar, len(c)):
if c[i] != 0:
varGroups[varnames[i]] = 'objF_{0}'.format(iObjQuad)
iObjQuad += 1
if len(A.indices[mstart[i]:mstart[i+1]]) > 0:
varGroups[varnames[i]] = linRownames[min(A.indices[mstart[i]:mstart[i+1]])]
else:
# fall back to flat naming. Warning: this mixes
# original with auxiliary variables.
varnames = ['x_{0:05d}'. format(i) for i in range(len(c))]
linRownames = ['lc_{0:05d}'.format(i) for i in range(len(b))]
self.prob_.loadproblem("CVXproblem",
['E'] * nrowsEQ + ['L'] * nrowsLEQ, # qrtypes
b, # rhs
None, # range
c, # obj coeff
mstart, # mstart
None, # mnel
A.indices, # row indices
A.data, # coefficients
[-xpress.infinity] * len(c), # lower bound
[xpress.infinity] * len(c), # upper bound
colnames=varnames, # column names
rownames=linRownames) # row names
x = numpy.array(self.prob_.getVariable()) # get whole variable vector
# Set variable types for discrete variables
self.prob_.chgcoltype(data[s.BOOL_IDX] + data[s.INT_IDX],
'B' * len(data[s.BOOL_IDX]) + 'I' * len(data[s.INT_IDX]))
currow = nrows
iCone = 0
auxVars = set(range(nOrigVar, len(c)))
# Conic constraints
#
# Quadratic objective and constraints fall in this category,
# as all quadratic stuff is converted into a cone via a linear transformation
for k in dims[s.SOC_DIM]:
# k is the size of the i-th cone, where i is the index
# within dims [s.SOC_DIM]. The cone variables in
# CVXOPT, apparently, are separate variables that are
# marked as conic but not shown in a cone explicitly.
A = data[s.A][currow: currow + k].tocsr()
b = data[s.B][currow: currow + k]
currow += k
if self.translate_back_QP_:
# Conic problem passed by CVXPY is translated back
# into a QP problem. The problem is passed to us
# as follows:
#
# min c'x
# s.t. Ax <>= b
# y[i] = P[i]' * x + b[i]
# ||y[i][1:]||_2 <= y[i][0]
#
# where P[i] is a matrix, b[i] is a vector. Get
# rid of the y variables by explicitly rewriting
# the conic constraint as quadratic:
#
# y[i][1:]' * y[i][1:] <= y[i][0]^2
#
# and hence
#
# (P[i][1:]' * x + b[i][1:])^2 <= (P[i][0]' * x + b[i][0])^2
Plhs = A[1:]
Prhs = A[0]
indRowL, indColL = Plhs.nonzero()
indRowR, indColR = Prhs.nonzero()
coeL = Plhs.data
coeR = Prhs.data
lhs = list(b[1:])
rhs = b[0]
for i in range(len(coeL)):
lhs[indRowL[i]] -= coeL[i] * x[indColL[i]]
for i in range(len(coeR)):
rhs -= coeR[i] * x[indColR[i]]
self.prob_.addConstraint(xpress.Sum([lhs[i]**2 for i in range(len(lhs))])
<= rhs**2)
else:
# Create new (cone) variables and add them to the problem
conevar = numpy.array([xpress.var(name='cX{0:d}_{1:d}'.format(iCone, i),
lb=-xpress.infinity if i > 0 else 0)
for i in range(k)])
self.prob_.addVariable(conevar)
initrow = self.prob_.attributes.rows
mstart = makeMstart(A, k, 0)
trNames = ['linT_qc{0:d}_{1:d}'.format(iCone, i) for i in range(k)]
# Linear transformation for cone variables <--> original variables
self.prob_.addrows(['E'] * k, # qrtypes
b, # rhs
mstart, # mstart
A.indices, # ind
A.data, # dmatval
names=trNames) # row names
self.prob_.chgmcoef([initrow + i for i in range(k)],
conevar, [1] * k)
conename = 'cone_qc{0:d}'.format(iCone)
# Real cone on the cone variables (if k == 1 there's no
# need for this constraint as y**2 >= 0 is redundant)
if k > 1:
self.prob_.addConstraint(
xpress.constraint(constraint=xpress.Sum
(conevar[i]**2 for i in range(1, k))
<= conevar[0] ** 2,
name=conename))
auxInd = list(set(A.indices) & auxVars)
if len(auxInd) > 0:
group = varGroups[varnames[auxInd[0]]]
for i in trNames:
transf2Orig[i] = group
transf2Orig[conename] = group
iCone += 1
# Objective. Minimize is by default both here and in CVXOPT
self.prob_.setObjective(xpress.Sum(c[i] * x[i] for i in range(len(c))))
# End of the conditional (warm-start vs. no warm-start) code,
# set options, solve, and report.
# Set options
#
# The parameter solver_opts is a dictionary that contains only
# one key, 'solver_opt', and its value is a dictionary
# {'control': value}, matching perfectly the format used by
# the Xpress Python interface.
if verbose:
self.prob_.controls.miplog = 2
self.prob_.controls.lplog = 1
self.prob_.controls.outputlog = 1
else:
self.prob_.controls.miplog = 0
self.prob_.controls.lplog = 0
self.prob_.controls.outputlog = 0
if 'solver_opts' in solver_opts.keys():
self.prob_.setControl(solver_opts['solver_opts'])
self.prob_.setControl({i: solver_opts[i] for i in solver_opts.keys()
if i in xpress.controls.__dict__.keys()})
# Solve
self.prob_.solve()
results_dict = {
'problem': self.prob_,
'status': self.prob_.getProbStatus(),
'obj_value': self.prob_.getObjVal(),
}
status_map_lp, status_map_mip = self.get_status_maps()
if self.is_mip(data):
status = status_map_mip[results_dict['status']]
else:
status = status_map_lp[results_dict['status']]
results_dict[s.XPRESS_TROW] = transf2Orig
results_dict[s.XPRESS_IIS] = None # Return no IIS if problem is feasible
if status in s.SOLUTION_PRESENT:
results_dict['x'] = self.prob_.getSolution()
if not self.is_mip(data):
results_dict['y'] = self.prob_.getDual()
elif status == s.INFEASIBLE:
# Retrieve all IIS. For LPs there can be more than one,
# but for QCQPs there is only support for one IIS.
iisIndex = 0
self.prob_.iisfirst(0) # compute all IIS
row, col, rtype, btype, duals, rdcs, isrows, icols = [], [], [], [], [], [], [], []
self.prob_.getiisdata(0, row, col, rtype, btype, duals, rdcs, isrows, icols)
origrow = []
for iRow in row:
if iRow.name in transf2Orig.keys():
name = transf2Orig[iRow.name]
else:
name = iRow.name
if name not in origrow:
origrow.append(name)
results_dict[s.XPRESS_IIS] = [{'orig_row': origrow,
'row': row,
'col': col,
'rtype': rtype,
'btype': btype,
'duals': duals,
'redcost': rdcs,
'isolrow': isrows,
'isolcol': icols}]
while self.prob_.iisnext() == 0:
iisIndex += 1
self.prob_.getiisdata(iisIndex,
row, col, rtype, btype, duals, rdcs, isrows, icols)
results_dict[s.XPRESS_IIS].append((
row, col, rtype, btype, duals, rdcs, isrows, icols))
return self.format_results(results_dict, data, cached_data)
def format_results(self, results_dict, data, cached_data):
"""Converts the solver output into standard form.
Parameters
----------
results_dict : dict
The solver output.
data : dict
Information about the problem.
cached_data : dict
A map of solver name to cached problem data.
Returns
-------
dict
The solver output in standard form.
"""
new_results = {}
if results_dict["status"] != s.SOLVER_ERROR:
solver_cache = cached_data[self.name()]
self.prob_ = results_dict['problem']
# Save variable types (continuous, integer, etc.)
vartypes = []
self.prob_.getcoltype(vartypes, 0, len(data[s.C]) - 1)
solver_cache.prev_result = {
# Save data of current problem so that if
# warm_start==True in the next call, we check these
# and decide whether to really do a warmstart.
'problem': self.prob_, # current problem
'obj': data[s.C], # objective coefficients
'mat': data[s.A], # matrix coefficients (+ lin tra for cones)
'rhs': data[s.B], # rhs of constraints (idem)
'cone_ind': data[s.DIMS][s.SOC_DIM], # cone indices (for the cone variables)
'vartype': vartypes # variable types
}
status_map_lp, status_map_mip = self.get_status_maps()
if self.is_mip(data):
new_results[s.STATUS] = status_map_mip[results_dict['status']]
else:
new_results[s.STATUS] = status_map_lp[results_dict['status']]
if new_results[s.STATUS] in s.SOLUTION_PRESENT:
new_results[s.PRIMAL] = results_dict['x']
new_results[s.VALUE] = results_dict['obj_value']
if not self.is_mip(data):
new_results[s.EQ_DUAL] = [-v for v in results_dict['y']]
new_results[s.XPRESS_IIS] = results_dict[s.XPRESS_IIS]
new_results[s.XPRESS_TROW] = results_dict[s.XPRESS_TROW]
return new_results
def get_problem_data(self, objective, constraints, cached_data):
data = super(XPRESS, self).get_problem_data(objective, constraints, cached_data)
data['XPRESSprob'] = self.prob_
return data
| StarcoderdataPython |
274624 | __author__ = 'filiph'
| StarcoderdataPython |
3559763 |
def output(s,l):
if l == 0:
return
print (s[l-1])
output(s,l-1)
i = raw_input('input:')
l = len(i)
print output(i,l) | StarcoderdataPython |
8017721 | import itertools
#产生指定数目的元素的所有排列(顺序有关)
x = itertools.permutations(range(4),3)
print(list(x))
"""
[(0, 1, 2), (0, 1, 3), (0, 2, 1), (0, 2, 3), (0, 3, 1), (0, 3, 2), (1, 0, 2), (1, 0, 3), (1, 2, 0), (1, 2, 3), (1, 3, 0), (1, 3, 2), (2, 0, 1), (2, 0, 3), (2, 1, 0), (2, 1, 3), (2, 3, 0), (2, 3, 1), (3, 0, 1), (3, 0, 2), (3, 1, 0), (3, 1, 2), (3, 2, 0), (3, 2, 1)]
"""
#产生多个列表和迭代器的(积)
y = itertools.product('abc',range(3))
print(list(y))
"""
output:
[('a', 0), ('a', 1), ('a', 2), ('b', 0), ('b', 1), ('b', 2), ('c', 0), ('c', 1), ('c', 2)]
""" | StarcoderdataPython |
4823938 | <reponame>ChristopherMayes/xopt
import logging
from concurrent.futures import Executor
from typing import Dict, Optional, Callable
import torch
from botorch.utils.sampling import draw_sobol_samples
from ..data import gather_and_save_training_data, get_data_json
from ..generators.generator import BayesianGenerator
from ..utils import get_candidates, submit_candidates
from ...vocs_tools import get_bounds
def synch(
vocs: Dict,
evaluate: Callable,
n_initial_samples: int,
n_steps: int,
candidate_generator: BayesianGenerator,
executor: Executor,
output_path: str = "",
restart_file: Optional[str] = None,
initial_x: Optional[torch.Tensor] = None,
custom_model: Optional[Callable] = None,
tkwargs: Optional[Dict] = None,
logger: logging.Logger = None,
) -> tuple:
# generate initial samples if no initial samples are given
if restart_file is None:
if initial_x is None:
initial_x = draw_sobol_samples(
torch.tensor(get_bounds(vocs), **tkwargs), 1, n_initial_samples
)[0]
else:
initial_x = initial_x
# submit evaluation of initial samples
logger.info(f"submitting initial candidates")
initial_y = submit_candidates(initial_x, executor, vocs, evaluate, {})
data = gather_and_save_training_data(
list(initial_y), vocs, tkwargs, output_path=output_path
)
train_x, train_y, train_c, inputs, outputs = data
else:
data = get_data_json(restart_file, vocs, **tkwargs)
train_x = data["variables"]
train_y = data["objectives"]
train_c = data["constraints"]
inputs = data["inputs"]
outputs = data["outputs"]
# do optimization
logger.info("starting optimization loop")
for i in range(n_steps):
candidates = get_candidates(
train_x,
train_y,
vocs,
candidate_generator,
train_c=train_c,
custom_model=custom_model,
)
# observe candidates
logger.info(f"submitting candidates")
fut = submit_candidates(candidates, executor, vocs, evaluate, {})
data = gather_and_save_training_data(
list(fut),
vocs,
tkwargs,
train_x,
train_y,
train_c,
inputs,
outputs,
output_path=output_path,
)
train_x, train_y, train_c, inputs, outputs = data
return train_x, train_y, train_c, inputs, outputs
| StarcoderdataPython |
12864982 | # Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Shroud Project Developers.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
#
########################################################################
"""
Generate a module for classes using PyBindGen
"""
import pybindgen
from pybindgen import (param, retval)
def generate(fp):
mod = pybindgen.Module('classes')
mod.add_include('"classes.hpp"')
namespace = mod.add_cpp_namespace('classes')
class1 = namespace.add_class('Class1')
class1.add_enum('DIRECTION', ['UP', 'DOWN', 'LEFT', 'RIGHT'])
# class1.add_function('AcceptEnum', None, [param('MyEnum_e', 'value')])
class1.add_instance_attribute('m_flag', 'int')
class1.add_constructor([param('int', 'flag')])
class1.add_constructor([])
class1.add_method('Method1', None, [])
sclass = namespace.add_class("Singleton", is_singleton=True)
sclass.add_method("getReference", retval("classes::Singleton&", caller_owns_return=True), [],
is_static=True)
# mod.add_class('Class1',
# memory_policy=cppclass.ReferenceCountingMethodsPolicy(
# incref_method='Ref',
# decref_method='Unref',
# peekref_method='PeekRef')
# )
# mod.add_function('DoSomething', retval('Class1 *', caller_owns_return=False), [])
mod.generate(fp)
if __name__ == '__main__':
import sys
generate(sys.stdout)
| StarcoderdataPython |
6654659 | <gh_stars>10-100
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2012, <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Plane wave solution and initializer.
"""
__all__ = ['PlaneWaveSolution', 'PlaneWaveAnchor', 'PlaneWaveHook']
import os
import math
import pickle
import numpy as np
from solvcon import anchor
from solvcon import hook
class PlaneWaveSolution(object):
# FIXME: THIS GUY NEEDS UNIT TEST. Debugging in these classes is like pain
# in the ass.
def __init__(self, **kw):
wvec = kw['wvec']
ctr = kw['ctr']
amp = kw['amp']
assert len(wvec) == len(ctr)
# calculate eigenvalues and eigenvectors.
evl, evc = self._calc_eigen(**kw)
# store data to self.
self.amp = evc * (amp / np.sqrt((evc**2).sum()))
self.ctr = ctr
self.wvec = wvec
self.afreq = evl * np.sqrt((wvec**2).sum())
self.wsp = evl
def _calc_eigen(self, *args, **kw):
"""Calculate and return a :py:class:`tuple` for eigenvalues and
eigenvectors. This method needs to be subclassed.
"""
wvec = kw['wvec']
mtrl = kw['mtrl']
idx = kw['idx']
nml = wvec/np.sqrt((wvec**2).sum())
jacos = mtrl.get_jacos()
jaco = jacos[0] * nml[0]
for idm in range(1, len(nml)):
jaco += jacos[idm] * nml[idm]
evl, evc = np.linalg.eig(jaco)
srt = evl.argsort()
evl = evl[srt[idx]].real
evc = evc[:,srt[idx]].real
evc *= evc[0]/abs(evc[0]+1.e-200)
return evl, evc
def __call__(self, svr, asol, adsol):
svr.create_alg().calc_planewave(
asol, adsol, self.amp, self.ctr, self.wvec, self.afreq)
class PlaneWaveAnchor(anchor.MeshAnchor):
"""Use :py:class:`PlaneWaveSolution` to calculate plane-wave solutions for
:py:class:`VewaveSolver <.solver.VewaveSolver>`.
"""
# FIXME: THIS GUY NEEDS UNIT TEST. The coupling with Hook isn't really
# easy to debug.
def __init__(self, svr, planewaves=None, **kw):
assert None is not planewaves
#: Sequence of :py:class:`PlaneWaveSolution` objects.
self.planewaves = planewaves
super(PlaneWaveAnchor, self).__init__(svr, **kw)
def _calculate(self, asol):
for pw in self.planewaves:
pw(self.svr, asol, self.adsol)
def provide(self):
ngstcell = self.svr.blk.ngstcell
nacell = self.svr.blk.ncell + ngstcell
# plane wave solution.
asol = self.svr.der['analytical'] = np.empty(
(nacell, self.svr.neq), dtype='float64')
adsol = self.adsol = np.empty(
(nacell, self.svr.neq, self.svr.blk.ndim),
dtype='float64')
asol.fill(0.0)
self._calculate(asol)
self.svr.soln[ngstcell:,:] = asol[ngstcell:,:]
self.svr.dsoln[ngstcell:,:,:] = adsol[ngstcell:,:,:]
# difference.
diff = self.svr.der['difference'] = np.empty(
(nacell, self.svr.neq), dtype='float64')
diff[ngstcell:,:] = self.svr.soln[ngstcell:,:] - asol[ngstcell:,:]
def postfull(self):
ngstcell = self.svr.ngstcell
# plane wave solution.
asol = self.svr.der['analytical']
self._calculate(asol)
# difference.
diff = self.svr.der['difference']
diff[ngstcell:,:] = self.svr.soln[ngstcell:,:] - asol[ngstcell:,:]
class PlaneWaveHook(hook.MeshHook):
# FIXME: THIS GUY NEEDS UNIT TEST. The coupling with Anchor isn't really
# easy to debug.
def __init__(self, svr, planewaves=None, **kw):
assert None is not planewaves
#: Sequence of :py:class:`PlaneWaveSolution` objects.
self.planewaves = planewaves
#: A :py:class:`dict` holding the calculated norm.
self.norm = dict()
super(PlaneWaveHook, self).__init__(svr, **kw)
def drop_anchor(self, svr):
svr.runanchors.append(
PlaneWaveAnchor(svr, planewaves=self.planewaves)
)
def _calculate(self):
neq = self.cse.execution.neq
var = self.cse.execution.var
asol = self._collect_interior(
'analytical', inder=True, consider_ghost=True)
diff = self._collect_interior(
'difference', inder=True, consider_ghost=True)
norm_Linf = np.empty(neq, dtype='float64')
norm_L2 = np.empty(neq, dtype='float64')
clvol = self.blk.clvol
for it in range(neq):
norm_Linf[it] = np.abs(diff[:,it]).max()
norm_L2[it] = np.sqrt((diff[:,it]**2*clvol).sum())
self.norm['Linf'] = norm_Linf
self.norm['L2'] = norm_L2
def preloop(self):
self.postmarch()
for ipw in range(len(self.planewaves)):
pw = self.planewaves[ipw]
self.info("planewave[%d]:\n" % ipw)
self.info(" c = %g, omega = %g, T = %.15e\n" % (
pw.wsp, pw.afreq, 2*np.pi/pw.afreq))
def postmarch(self):
psteps = self.psteps
istep = self.cse.execution.step_current
if istep%psteps == 0:
self._calculate()
def postloop(self):
fname = '%s_norm.pickle' % self.cse.io.basefn
fname = os.path.join(self.cse.io.basedir, fname)
pickle.dump(self.norm, open(fname, 'wb'), -1)
self.info('Linf norm in velocity:\n')
self.info(' %e, %e, %e\n' % tuple(self.norm['Linf'][:3]))
self.info('L2 norm in velocity:\n')
self.info(' %e, %e, %e\n' % tuple(self.norm['L2'][:3]))
# vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
| StarcoderdataPython |
1748496 | <filename>src/gui2.py
import sys
import re
import PyQt5.QtWidgets as qt
from pathlib import Path
from PyQt5 import QtCore
from PyQt5.QtGui import QPixmap
class PredictDXA(qt.QWidget):
'''
GUI to predict.py
'''
def __init__(self):
super().__init__()
self.init_ui()
self.in_folder = '.'
self.leg_sw = 0
self.dxa_sw = 0
self.rec_sw = 0
def init_ui(self):
'''
GUI elements
'''
self.setGeometry(300, 300, 1200, 300)
self.setWindowTitle("Predict DXA")
self.lbl1 = qt.QLabel('Select input directory', self)
self.lbl1.setMinimumWidth(220)
self.input_txt = qt.QLineEdit(self)
self.btn1 = qt.QPushButton('Browse', self)
self.btn1.setToolTip('Select input directory')
self.btn1.clicked.connect(self.choose_dir)
self.lbl2 = qt.QLabel('Mouse legs are...', self)
self.stretch = qt.QButtonGroup()
self.s1 = qt.QRadioButton("Not stretched")
self.s1.setChecked(True)
self.s1.clicked.connect(lambda: self.btnstate(self.s1))
self.stretch.addButton(self.s1)
self.s2 = qt.QRadioButton("Stretched")
self.s2.clicked.connect(lambda: self.btnstate(self.s2))
self.stretch.addButton(self.s2)
self.fold = qt.QButtonGroup()
self.s3 = qt.QRadioButton("DXA")
self.s3.setChecked(True)
self.s3.clicked.connect(lambda: self.btnstate2(self.s3))
self.fold.addButton(self.s3)
self.s4 = qt.QRadioButton("Rec")
self.s4.clicked.connect(lambda: self.btnstate2(self.s4))
self.fold.addButton(self.s4)
self.s5 = qt.QRadioButton("This folder only")
self.s5.clicked.connect(lambda: self.btnstate2(self.s5))
self.fold.addButton(self.s5)
self.btn_submit = qt.QPushButton('Submit', self)
self.btn_submit.clicked.connect(lambda: self.call_program(self.in_folder,
self.leg_sw,
self.dxa_sw,
self.rec_sw))
self.btn_cancel = qt.QPushButton('Cancel', self)
self.btn_cancel.setEnabled(False)
self.btn_cancel.clicked.connect(self.end)
self.lbl3 = qt.QLabel('Folders', self)
self.output = qt.QTextEdit()
self.img_label = qt.QLabel()
self.img = qt.QLabel()
self.img.setFixedHeight(300)
# QProcess object for external app
self.process = QtCore.QProcess(self)
# QProcess emits `readyRead` when there is data to be read
self.process.readyRead.connect(lambda: [self.data_ready(), self.set_img()])
self.process.started.connect(lambda: [self.btn_submit.setEnabled(False),
self.btn_cancel.setEnabled(True)])
self.process.finished.connect(lambda: [self.btn_submit.setEnabled(True),
self.btn_cancel.setEnabled(False)])
self.create_grid_layout()
window_layout = qt.QVBoxLayout()
window_layout.addWidget(self.horizontal_groupbox)
self.setLayout(window_layout)
def create_grid_layout(self):
'''
arrange gui elements
'''
self.horizontal_groupbox = qt.QGroupBox()
grid = qt.QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.lbl1, 1, 0)
grid.addWidget(self.input_txt, 2, 0, 1, 3)
grid.addWidget(self.btn1, 2, 3)
grid.addWidget(self.lbl2, 4, 0, 1, 2)
grid.addWidget(self.s1, 5, 0, 1, 2)
grid.addWidget(self.s2, 6, 0, 1, 2)
grid.addWidget(self.lbl3, 4, 2, 1, 2)
grid.addWidget(self.s3, 5, 2, 1, 2)
grid.addWidget(self.s4, 6, 2, 1, 2)
grid.addWidget(self.s5, 7, 2, 1, 2)
grid.addWidget(self.btn_submit, 9, 0)
grid.addWidget(self.btn_cancel, 9, 3)
grid.addWidget(self.output, 1, 4, 10, 10)
grid.addWidget(self.img, 12, 12, 2, 2)
grid.addWidget(self.img_label, 14, 12)
self.horizontal_groupbox.setLayout(grid)
def choose_dir(self):
'''
Select input directory
'''
current = str(self.input_txt.text())
if current:
input_dir = qt.QFileDialog.getExistingDirectory(None, 'Select a folder:', current)
else:
input_dir = qt.QFileDialog.getExistingDirectory(None, 'Select a folder:')
self.input_txt.setText(input_dir)
self.in_folder = input_dir
def btnstate(self, b):
'''
Read state of Legs stretched/unstretched radiobuttons
'''
if b.text() == "Not stretched":
if b.isChecked:
self.leg_sw = 0
if b.text() == "Stretched":
if b.isChecked:
self.leg_sw = 1
def btnstate2(self, b):
'''
Read state of Legs stretched/unstretched radiobuttons
'''
if b.text() == "DXA":
if b.isChecked:
self.dxa_sw = 0
self.rec_sw = 0
if b.text() == "Rec":
if b.isChecked:
self.dxa_sw = 0
self.rec_sw = 1
if b.text() == "This folder":
if b.isChecked:
self.dxa_sw = 1
self.rec_sw = 0
def data_ready(self):
'''
read stdout from running process, put it into textbox
'''
cursor = self.output.textCursor()
cursor.movePosition(cursor.End)
cursor.insertText(str(self.process.readAll(), 'utf-8'))
self.output.ensureCursorVisible()
def call_program(self, in_folder, leg, dxa, rec):
'''
run the process
`start` takes the exec and a list of arguments
'''
if leg:
stretch = '-s '
else:
stretch = ''
if dxa:
fld = '-d '
else:
fld = ''
if rec:
dirname = '-r'
else:
dirname = ""
in_folder = '"' + in_folder + '" '
srcloc = Path(__file__).resolve().parent
cmd = 'python3.7 ' + str(srcloc/"predict.py") + ' -i ' + in_folder + stretch + fld + dirname
print(cmd)
print(in_folder)
self.process.start(cmd)
#self.process.start('bash runit.sh')
def end(self):
'''
terminate running process
'''
self.process.terminate()
def set_img(self):
'''
Read image path from stdout
Show the image
'''
txt = self.output.toPlainText()
lst = find_all(txt)
if lst:
i_start = lst[-1]+13
i_end = txt.find(".bmp", i_start)+4
img_path = txt[i_start:i_end]
self.img.resize(300, 300)
print(img_path)
pixmap = QPixmap(img_path)
self.img_label.setText("First image: " + img_path.split("/")[-1])
self.img.setPixmap(pixmap.scaled(self.img.size(), QtCore.Qt.KeepAspectRatio))
def find_all(txt, substring="First image:"):
'''
Input: string, substring
Output: list of starting indices of all non-overlapping
occurences of the substring in string
'''
lst = [s.start() for s in re.finditer(substring, txt)]
return lst
if __name__ == '__main__':
app = qt.QApplication(sys.argv)
ex = PredictDXA()
ex.show()
sys.exit(app.exec_())
| StarcoderdataPython |
6658412 | <reponame>xianjunzhengbackup/Cloud-Native-Python
/usr/local/lib/python3.6/_collections_abc.py | StarcoderdataPython |
3420552 | # Copyright (c) 2020 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from tk_toolchain.authentication import get_toolkit_user
# This test is meant to be called from the command-line and not by pytest.
if __name__ == "__main__":
# This ensures that tk-toolchain was able to build credentials that are valid.
# Hit the server just to make sure the credentials are all good.
get_toolkit_user().create_sg_connection().find_one("HumanUser", [])
| StarcoderdataPython |
6562463 | # -*- coding: utf-8 -*-
"""
jctl
Command line tool to use Python-Jamf
"""
pass | StarcoderdataPython |
8022456 | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def sumOfLeftLeaves(root):
def sumOfLeftLeavesHelper(root, isLeft):
if root == None:
return 0
else:
hasLeftChild = root.left != None
hasRightChild = root.right != None
isLeaf = not hasLeftChild and not hasRightChild
if isLeft and isLeaf:
return root.val
else:
leftSum = sumOfLeftLeavesHelper(root.left, True)
rightSum = sumOfLeftLeavesHelper(root.right, False)
return leftSum + rightSum
return sumOfLeftLeavesHelper(root, False) | StarcoderdataPython |
3512725 | <reponame>dfunckt/django-connections<gh_stars>10-100
#!/usr/bin/env python
from os.path import dirname, join
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from connections import VERSION
def get_version(version):
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub
with open(join(dirname(__file__), 'README.rst')) as f:
long_description = f.read()
setup(
name='django-connections',
description='Create, query and manage graphs of relationships between your Django models',
version=get_version(VERSION),
long_description=long_description,
url='http://github.com/dfunckt/django-connections',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
zip_safe=False,
packages=[
'connections',
'connections.templatetags',
],
install_requires=[
'Django >= 1.5',
],
tests_require=[
'nose',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
| StarcoderdataPython |
3362153 | <reponame>yetingli/ReDoS-Benchmarks<gh_stars>1-10
# 225
# (<[^>]*?tag[^>]*?(?:identify_by)[^>]*>)((?:.*?(?:<[ \r\t]*tag[^>]*>?.*?(?:<.*?/.*?tag.*?>)?)*)*)(<[^>]*?/[^>]*?tag[^>]*?>)
# EXPONENT
# nums:4
# EXPONENT AttackString:"<tagidentify_by>"+"<tag</!tag</>"*2+"! _1_EOA(i or ii)"
import re
from time import perf_counter
regex = """(<[^>]*?tag[^>]*?(?:identify_by)[^>]*>)((?:.*?(?:<[ \r\t]*tag[^>]*>?.*?(?:<.*?/.*?tag.*?>)?)*)*)(<[^>]*?/[^>]*?tag[^>]*?>)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "<tagidentify_by>" + "<tag</!tag</>" * i * 1 + "! _1_EOA(i or ii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | StarcoderdataPython |
5010773 | """This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking
"""
import zmq
from zmq import *
from zmq import devices
__all__ = zmq.__all__
import gevent
from gevent import select
from gevent.event import AsyncResult
from gevent.hub import get_hub
class GreenSocket(Socket):
"""Green version of :class:`zmq.core.socket.Socket`
The following methods are overridden:
* send
* send_multipart
* recv
* recv_multipart
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or recieving
is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
The `__state_changed` method is triggered when the zmq.FD for the socket is
marked as readable and triggers the necessary read and write events (which
are waited for in the recv and send methods).
Some double underscore prefixes are used to minimize pollution of
:class:`zmq.core.socket.Socket`'s namespace.
"""
def __init__(self, context, socket_type):
self.__in_send_multipart = False
self.__in_recv_multipart = False
self.__setup_events()
def __del__(self):
self.close()
def close(self, linger=None):
super(GreenSocket, self).close(linger)
self.__cleanup_events()
def __cleanup_events(self):
# close the _state_event event, keeps the number of active file descriptors down
if getattr(self, '_state_event', None):
try:
self._state_event.stop()
except AttributeError, e:
# gevent<1.0 compat
self._state_event.cancel()
# if the socket has entered a close state resume any waiting greenlets
if hasattr(self, '__writable'):
self.__writable.set()
self.__readable.set()
def __setup_events(self):
self.__readable = AsyncResult()
self.__writable = AsyncResult()
try:
self._state_event = get_hub().loop.io(self.getsockopt(FD), 1) # read state watcher
self._state_event.start(self.__state_changed)
except AttributeError:
# for gevent<1.0 compatibility
from gevent.core import read_event
self._state_event = read_event(self.getsockopt(FD), self.__state_changed, persist=True)
def __state_changed(self, event=None, _evtype=None):
if self.closed:
self.__cleanup_events()
return
try:
events = super(GreenSocket, self).getsockopt(zmq.EVENTS)
except ZMQError, exc:
self.__writable.set_exception(exc)
self.__readable.set_exception(exc)
else:
if events & zmq.POLLOUT:
self.__writable.set()
if events & zmq.POLLIN:
self.__readable.set()
def _wait_write(self):
self.__writable = AsyncResult()
try:
self.__writable.get(timeout=1)
except gevent.Timeout:
self.__writable.set()
def _wait_read(self):
self.__readable = AsyncResult()
try:
self.__readable.get(timeout=1)
except gevent.Timeout:
self.__readable.set()
def send(self, data, flags=0, copy=True, track=False):
# if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
if flags & zmq.NOBLOCK:
try:
msg = super(GreenSocket, self).send(data, flags, copy, track)
finally:
if not self.__in_send_multipart:
self.__state_changed()
return msg
# ensure the zmq.NOBLOCK flag is part of flags
flags |= zmq.NOBLOCK
while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
try:
# attempt the actual call
return super(GreenSocket, self).send(data, flags, copy, track)
except zmq.ZMQError, e:
# if the raised ZMQError is not EAGAIN, reraise
if e.errno != zmq.EAGAIN:
raise
# defer to the event loop until we're notified the socket is writable
self._wait_write()
def recv(self, flags=0, copy=True, track=False):
if flags & zmq.NOBLOCK:
try:
msg = super(GreenSocket, self).recv(flags, copy, track)
finally:
if not self.__in_recv_multipart:
self.__state_changed()
return msg
flags |= zmq.NOBLOCK
while True:
try:
return super(GreenSocket, self).recv(flags, copy, track)
except zmq.ZMQError, e:
if e.errno != zmq.EAGAIN:
if not self.__in_recv_multipart:
self.__state_changed()
raise
else:
if not self.__in_recv_multipart:
self.__state_changed()
return msg
self._wait_read()
def send_multipart(self, *args, **kwargs):
"""wrap send_multipart to prevent state_changed on each partial send"""
self.__in_send_multipart = True
try:
msg = super(GreenSocket, self).send_multipart(*args, **kwargs)
finally:
self.__in_send_multipart = False
self.__state_changed()
return msg
def recv_multipart(self, *args, **kwargs):
"""wrap recv_multipart to prevent state_changed on each partial recv"""
self.__in_recv_multipart = True
try:
msg = super(GreenSocket, self).recv_multipart(*args, **kwargs)
finally:
self.__in_recv_multipart = False
self.__state_changed()
return msg
class GreenContext(Context):
"""Replacement for :class:`zmq.core.context.Context`
Ensures that the greened Socket above is used in calls to `socket`.
"""
_socket_class = GreenSocket
| StarcoderdataPython |
1783729 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: foop.py
Version: 0.1
Author: dhilipsiva <<EMAIL>>
Date created: 2015-05-15
"""
__author__ = "dhilipsiva"
__status__ = "development"
"""
"""
class Foo(object):
def baz(self):
"""
docstring for baz
"""
print "Foo"
class Bar(object):
def baz(self):
"""
docstring for baz
"""
print "Bar"
class Blah(object):
def baz(self):
"""
docstring for baz
"""
print "Blah"
class Baz(Blah, Foo, Bar):
pass
baz = Baz()
baz.baz()
| StarcoderdataPython |
1710032 | from django.conf.urls import url
from views import calendar
urlpatterns = [
url(
# ### Views of calendar ###
r'^all$',
calendar.AllEvents.as_view()
)
]
| StarcoderdataPython |
12827823 | """
Unittests checking the initialization of the ``MockServer`` class and evaluation of the config passed to it.
"""
from unittest.mock import patch
import pytest
from server_double.server import Endpoint, MockServer
def test_port():
server = MockServer(config={"port": 8083})
assert server.port == 8083
@pytest.mark.parametrize(
"url,config",
[
("/endpoint", {"status_code": 200}),
("/resource", {"status_code": 204}),
("/foobar", {"status_code": 303}),
],
)
def test_single_endpoint(url, config):
with patch("server_double.server.cherrypy") as cherrypy_mock:
_ = MockServer(config={"endpoints": {url: config}})
root, script_path = cherrypy_mock.tree.mount.call_args[0]
assert script_path == url
assert isinstance(root, Endpoint)
assert root.url == url
assert root.default_status == config["status_code"]
| StarcoderdataPython |
4900204 | <reponame>HaoranDennis/pandapower
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
import pytest
import pandapower as pp
import pandapower.shortcircuit as sc
@pytest.fixture
def one_line_one_generator():
net = pp.create_empty_network()
b1 = pp.create_bus(net, vn_kv=10.)
b2 = pp.create_bus(net, vn_kv=10.)
b3 = pp.create_bus(net, vn_kv=10.)
pp.create_bus(net, vn_kv=0.4, in_service=False)
pp.create_gen(net, b1, vn_kv=10.5, xdss_pu=0.2, rdss_pu=0.001, cos_phi=0.8, p_mw=0.1, sn_mva=2.5)
pp.create_gen(net, b1, vn_kv=10.5, xdss_pu=0.2, rdss_pu=0.001, cos_phi=0.8, p_mw=0.1, sn_mva=2.5)
l = pp.create_line_from_parameters(net, b2, b1, length_km=1.0, max_i_ka=0.29,
r_ohm_per_km=0.1548, x_ohm_per_km=0.0816814, c_nf_per_km=165)
net.line.loc[l, "endtemp_degree"] = 165
pp.create_switch(net, b3, b1, et="b")
return net
def test_max_gen(one_line_one_generator):
net = one_line_one_generator
sc.calc_sc(net, case="max")
assert abs(net.res_bus_sc.ikss_ka.at[0] - 1.5395815) < 1e-7
assert abs(net.res_bus_sc.ikss_ka.at[2] - 1.5395815) < 1e-7
assert abs(net.res_bus_sc.ikss_ka.at[1] - 1.5083952) < 1e-7
assert pd.isnull(net.res_bus_sc.ikss_ka.at[3])
def test_min_gen(one_line_one_generator):
net = one_line_one_generator
sc.calc_sc(net, case="min")
assert abs(net.res_bus_sc.ikss_ka.at[0] - 1.3996195) < 1e-7
assert abs(net.res_bus_sc.ikss_ka.at[2] - 1.3996195) < 1e-7
assert abs(net.res_bus_sc.ikss_ka.at[1] - 1.3697407) < 1e-7
assert pd.isnull(net.res_bus_sc.ikss_ka.at[3])
def test_max_gen_fault_impedance(one_line_one_generator):
net = one_line_one_generator
sc.calc_sc(net, case="max", r_fault_ohm=2, x_fault_ohm=10)
assert abs(net.res_bus_sc.ikss_ka.at[0] - 0.4450868) < 1e-7
assert abs(net.res_bus_sc.ikss_ka.at[1] - 0.4418823) < 1e-7
assert abs(net.res_bus_sc.ikss_ka.at[2] - 0.4450868) < 1e-7
assert pd.isnull(net.res_bus_sc.ikss_ka.at[3])
if __name__ == '__main__':
pytest.main(['test_gen.py'])
| StarcoderdataPython |
12846497 | <filename>util/vis_tool.py
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
import cv2
class VisualUtil:
def __init__(self, dataset):
self.dataset = dataset
# RED BGR
self.color_pred = [(0,0,102), (0,0,179), (0,0,255), (77,77,255), (153,153,255)]
# self.color_pred = ['#660000', '#b30000', '#ff0000', '#ff4d4d', '#ff9999']
# BLUE BGR
self.color_gt = [(102,0,0), (179,0,0), (255,0,0), (255,77,77), (255,153,153)]
# self.color_gt = ['#000066', '#0000b3', '#0000ff', '#4d4dff', '#9999ff']
def plot(self, img, path, jt_uvd_pred, jt_uvd_gt=None):
uvd_pred = jt_uvd_pred.reshape(-1, 3)
image = img.copy()
image = (image.squeeze() + 1) * 100
image = image[:, :, np.newaxis].repeat(3, axis=-1)
self._plot_fingers(image, uvd_pred, self.color_pred)
if isinstance(jt_uvd_gt, np.ndarray):
uvd_gt = jt_uvd_gt.reshape(-1, 3)
self._plot_fingers(image, uvd_gt, self.color_gt)
cv2.imwrite(path, image)
def _plot_fingers(self, img, jt_uvd, colors):
jt_idx, sketch = self._get_setting()
for i in range(len(colors)):
for idx in jt_idx[i]:
cv2.circle(img, (int(jt_uvd[idx][0]), int(jt_uvd[idx][1])),
2, colors[i], -1)
for (s, e) in sketch[i]:
cv2.line(img, (int(jt_uvd[s][0]), int(jt_uvd[s][1])),
(int(jt_uvd[e][0]), int(jt_uvd[e][1])),
colors[i], 1)
return
def _get_setting(self):
if self.dataset == 'nyu':
jt_idx = [[0,1], [2,3], [4,5], [6,7], [8,9,10,11,12,13]]
sketch = [[(0, 1), (1, 13)],
[(2, 3), (3, 13)],
[(4, 5), (5, 13)],
[(6, 7), (7, 13)],
[(8, 9), (9, 10),(10, 13), (11, 13), (12, 13)]]
return jt_idx, sketch
elif 'hands' in self.dataset:
jt_idx = [[1,6,7,8], [2,9,10,11], [3,12,13,14], [4,15,16,17], [5,18,19,20,0]]
sketch = [[(0, 1), (1, 6), (6, 7), (7, 8)],
[(0, 2), (2, 9), (9, 10), (10, 11)],
[(0, 3), (3, 12), (12, 13), (13, 14)],
[(0, 4), (4, 15), (15, 16), (16, 17)],
[(0, 5), (5, 18), (18, 19), (19, 20)]]
return jt_idx, sketch
elif self.dataset == 'icvl':
jt_idx = [[1,2,3], [4,5,6], [7,8,9], [10,11,12], [13,14,15, 0]]
sketch = [[(0, 1), (1, 2), (2, 3)],
[(0, 4), (4, 5), (5, 6)],
[(0, 7), (7, 8), (8, 9)],
[(0, 10), (10, 11), (11, 12)],
[(0, 13), (13, 14), (14, 15)]]
return jt_idx, sketch
elif self.dataset == 'msra':
jt_idx = [[1,2,3,4], [5,6,7,8], [9,10,11,12], [13,14,15,16], [17,18,19,20,0]]
sketch = [[(0, 1), (1, 2), (2, 3), (3, 4)],
[(0, 5), (5, 6), (6, 7), (7, 8)],
[(0, 9), (9, 10), (10, 11), (11, 12)],
[(0, 13), (13, 14), (14, 15), (15, 16)],
[(0, 17), (17, 18), (18, 19), (19, 20)]]
return jt_idx, sketch
| StarcoderdataPython |
1809211 | <reponame>yehonatanz/konst<gh_stars>0
def test_string():
from konst.string import A
assert A == 'A'
def test_string_preserve_case():
from konst.string import mixedCase
assert mixedCase == 'mixedCase'
| StarcoderdataPython |
6550784 | <gh_stars>1-10
"""
Progress Tab Serializers
"""
from datetime import datetime
from rest_framework import serializers
from rest_framework.reverse import reverse
from pytz import UTC
from lms.djangoapps.course_home_api.serializers import ReadOnlySerializer, VerifiedModeSerializer
class CourseGradeSerializer(ReadOnlySerializer):
"""
Serializer for course grade
"""
letter_grade = serializers.CharField()
percent = serializers.FloatField()
is_passing = serializers.BooleanField(source='passed')
class SubsectionScoresSerializer(ReadOnlySerializer):
"""
Serializer for subsections in section_scores
"""
assignment_type = serializers.CharField(source='format')
block_key = serializers.SerializerMethodField()
display_name = serializers.CharField()
has_graded_assignment = serializers.BooleanField(source='graded')
learner_has_access = serializers.SerializerMethodField()
num_points_earned = serializers.FloatField(source='graded_total.earned')
num_points_possible = serializers.FloatField(source='graded_total.possible')
percent_graded = serializers.FloatField()
problem_scores = serializers.SerializerMethodField()
show_correctness = serializers.CharField()
show_grades = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
def get_block_key(self, subsection):
return str(subsection.location)
def get_problem_scores(self, subsection):
"""Problem scores for this subsection"""
problem_scores = [
{
'earned': score.earned,
'possible': score.possible,
}
for score in subsection.problem_scores.values()
]
return problem_scores
def get_url(self, subsection):
"""
Returns the URL for the subsection while taking into account if the course team has
marked the subsection's visibility as hide after due.
"""
hide_url_date = subsection.end if subsection.self_paced else subsection.due
if (not self.context['staff_access'] and subsection.hide_after_due and hide_url_date
and datetime.now(UTC) > hide_url_date):
return None
relative_path = reverse('jump_to', args=[self.context['course_key'], subsection.location])
request = self.context['request']
return request.build_absolute_uri(relative_path)
def get_show_grades(self, subsection):
return subsection.show_grades(self.context['staff_access'])
def get_learner_has_access(self, subsection):
course_blocks = self.context['course_blocks']
return not course_blocks.get_xblock_field(subsection.location, 'contains_gated_content', False)
class SectionScoresSerializer(ReadOnlySerializer):
"""
Serializer for sections in section_scores
"""
display_name = serializers.CharField()
subsections = SubsectionScoresSerializer(source='sections', many=True)
class GradingPolicySerializer(ReadOnlySerializer):
"""
Serializer for grading policy
"""
assignment_policies = serializers.SerializerMethodField()
grade_range = serializers.DictField(source='GRADE_CUTOFFS')
def get_assignment_policies(self, grading_policy):
return [{
'num_droppable': assignment_policy['drop_count'],
'num_total': float(assignment_policy['min_count']),
'short_label': assignment_policy.get('short_label', ''),
'type': assignment_policy['type'],
'weight': assignment_policy['weight'],
} for assignment_policy in grading_policy['GRADER']]
class CertificateDataSerializer(ReadOnlySerializer):
"""
Serializer for certificate data
"""
cert_status = serializers.CharField()
cert_web_view_url = serializers.CharField()
download_url = serializers.CharField()
certificate_available_date = serializers.DateTimeField()
class VerificationDataSerializer(ReadOnlySerializer):
"""
Serializer for verification data object
"""
link = serializers.URLField()
status = serializers.CharField()
status_date = serializers.DateTimeField()
class ProgressTabSerializer(VerifiedModeSerializer):
"""
Serializer for progress tab
"""
access_expiration = serializers.DictField()
certificate_data = CertificateDataSerializer()
completion_summary = serializers.DictField()
course_grade = CourseGradeSerializer()
end = serializers.DateTimeField()
enrollment_mode = serializers.CharField()
grading_policy = GradingPolicySerializer()
has_scheduled_content = serializers.BooleanField()
section_scores = SectionScoresSerializer(many=True)
studio_url = serializers.CharField()
username = serializers.CharField()
user_has_passing_grade = serializers.BooleanField()
verification_data = VerificationDataSerializer()
| StarcoderdataPython |
3303002 | from frappe import _
def get_data():
return {
'fieldname': 'trip_order',
'transactions': [
{
'label': _('Fulfillment'),
'items': ['Sales Invoice', 'Delivery Note']
},
{
'label': _('Purchasing'),
'items': ['Material Request', 'Purchase Order']
},
{
'label': _('Projects'),
'items': ['Project']
},
{
'label': _('Manufacturing'),
'items': ['Production Order']
},
{
'label': _('Reference'),
'items': ['Quotation', 'Subscription']
},
{
'label': _('Payment'),
'items': ['Payment Entry', 'Payment Request', 'Journal Entry']
},
]
}
| StarcoderdataPython |
5006 | """
Module for plotting analyses
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import pickle, json
import os
from matplotlib.offsetbox import AnchoredOffsetbox
try:
basestring
except NameError:
basestring = str
colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3
class MetaFigure:
"""A class which defines a figure object"""
def __init__(self, kind, sim=None, subplots=None, rcParams=None, autosize=0.35, **kwargs):
if not sim:
from .. import sim
self.sim = sim
self.kind = kind
# Make a copy of the current matplotlib rcParams and update them
self.orig_rcParams = deepcopy(mpl.rcParamsDefault)
if rcParams:
for rcParam in rcParams:
if rcParam in mpl.rcParams:
mpl.rcParams[rcParam] = rcParams[rcParam]
else:
print(rcParam, 'not found in matplotlib.rcParams')
self.rcParams = rcParams
else:
self.rcParams = self.orig_rcParams
# Set up any subplots
if not subplots:
nrows = 1
ncols = 1
elif type(subplots) == int:
nrows = subplots
ncols = 1
elif type(subplots) == list:
nrows = subplots[0]
ncols = subplots[1]
# Create figure
if 'figSize' in kwargs:
figSize = kwargs['figSize']
else:
figSize = self.rcParams['figure.figsize']
if 'dpi' in kwargs:
dpi = kwargs['dpi']
else:
dpi = self.rcParams['figure.dpi']
if autosize:
maxplots = np.max([nrows, ncols])
figSize0 = figSize[0] + (maxplots-1)*(figSize[0]*autosize)
figSize1 = figSize[1] + (maxplots-1)*(figSize[1]*autosize)
figSize = [figSize0, figSize1]
self.fig, self.ax = plt.subplots(nrows, ncols, figsize=figSize, dpi=dpi)
self.plotters = []
def saveFig(self, sim=None, fileName=None, fileDesc=None, fileType='png', fileDir=None, overwrite=True, **kwargs):
"""
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
"""
if not sim:
from .. import sim
if fileDesc is not None:
fileDesc = '_' + str(fileDesc)
else:
fileDesc = '_' + self.kind
if fileType not in self.fig.canvas.get_supported_filetypes():
raise Exception('fileType not recognized in saveFig')
else:
fileExt = '.' + fileType
if not fileName or not isinstance(fileName, basestring):
fileName = self.sim.cfg.filename + fileDesc + fileExt
else:
if fileName.endswith(fileExt):
fileName = fileName.split(fileExt)[0] + fileDesc + fileExt
else:
fileName = fileName + fileDesc + fileExt
if fileDir is not None:
fileName = os.path.join(fileDir, fileName)
if not overwrite:
while os.path.isfile(fileName):
try:
fileNumStr = fileName.split(fileExt)[0].split('_')[-1]
fileNumStrNew = str(int(fileNumStr) + 1).zfill(2)
fileName = fileName.split('_' + fileNumStr)[0]
except:
fileNumStr = fileNumStrNew = '01'
fileName = fileName.split(fileExt)[0]
fileName = fileName.split(fileNumStr)[0] + '_' + fileNumStrNew + fileExt
self.fig.savefig(fileName)
self.fileName = fileName
return fileName
def showFig(self, **kwargs):
try:
self.fig.show(block=False)
except:
self.fig.show()
def addSuptitle(self, **kwargs):
self.fig.suptitle(**kwargs)
def finishFig(self, **kwargs):
if 'suptitle' in kwargs:
if kwargs['suptitle']:
self.addSuptitle(**kwargs['suptitle'])
if 'tightLayout' not in kwargs:
plt.tight_layout()
elif kwargs['tightLayout']:
plt.tight_layout()
if 'saveFig' in kwargs:
if kwargs['saveFig']:
self.saveFig(**kwargs)
if 'showFig' in kwargs:
if kwargs['showFig']:
self.showFig(**kwargs)
else:
plt.close(self.fig)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.orig_rcParams)
class GeneralPlotter:
"""A class used for plotting"""
def __init__(self, data, kind, axis=None, sim=None, rcParams=None, metafig=None, **kwargs):
"""
Parameters
----------
data : dict, str
axis : matplotlib axis
The axis to plot into. If axis is set to None, a new figure and axis are created and plotted into. If plotting into an existing axis, more options are available: xtwin, ytwin,
"""
self.kind = kind
# Load data
if type(data) == str:
if os.path.isfile(data):
self.data = self.loadData(data)
else:
raise Exception('In Plotter, if data is a string, it must be the path to a data file.')
else:
self.data = data
if not sim:
from .. import sim
self.sim = sim
self.axis = axis
if metafig:
self.metafig = metafig
# If an axis is input, plot there; otherwise make a new figure and axis
if self.axis is None:
final = True
self.metafig = MetaFigure(kind=self.kind, **kwargs)
self.fig = self.metafig.fig
self.axis = self.metafig.ax
else:
self.fig = self.axis.figure
# Attach plotter to its MetaFigure
self.metafig.plotters.append(self)
def loadData(self, fileName, fileDir=None, sim=None):
from ..analysis import loadData
self.data = loadData(fileName=fileName, fileDir=fileDir, sim=None)
def saveData(self, fileName=None, fileDesc=None, fileType=None, fileDir=None, sim=None, **kwargs):
from ..analysis import saveData as saveFigData
saveFigData(self.data, fileName=fileName, fileDesc=fileDesc, fileType=fileType, fileDir=fileDir, sim=sim, **kwargs)
def formatAxis(self, **kwargs):
if 'title' in kwargs:
self.axis.set_title(kwargs['title'])
if 'xlabel' in kwargs:
self.axis.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs:
self.axis.set_ylabel(kwargs['ylabel'])
if 'xlim' in kwargs:
if kwargs['xlim'] is not None:
self.axis.set_xlim(kwargs['xlim'])
if 'ylim' in kwargs:
if kwargs['ylim'] is not None:
self.axis.set_ylim(kwargs['ylim'])
if 'invert_yaxis' in kwargs:
if kwargs['invert_yaxis'] is True:
self.axis.invert_yaxis()
def addLegend(self, handles=None, labels=None, **kwargs):
legendParams = ['loc', 'bbox_to_anchor', 'fontsize', 'numpoints', 'scatterpoints', 'scatteryoffsets', 'markerscale', 'markerfirst', 'frameon', 'fancybox', 'shadow', 'framealpha', 'facecolor', 'edgecolor', 'mode', 'bbox_transform', 'title', 'title_fontsize', 'borderpad', 'labelspacing', 'handlelength', 'handletextpad', 'borderaxespad', 'columnspacing', 'handler_map']
# Check for and apply any legend parameters in the kwargs
legendKwargs = {}
for kwarg in kwargs:
if kwarg in legendParams:
legendKwargs[kwarg] = kwargs[kwarg]
# If 'legendKwargs' is found in kwargs, use those values instead of the defaults
if 'legendKwargs' in kwargs:
legendKwargs_new = kwargs['legendKwargs']
for key in legendKwargs_new:
if key in legendParams:
legendKwargs[key] = legendKwargs_new[key]
cur_handles, cur_labels = self.axis.get_legend_handles_labels()
if not handles:
handles = cur_handles
if not labels:
labels = cur_labels
self.axis.legend(handles, labels, **legendKwargs)
def addScalebar(self, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
add_scalebar(self.axis, matchx=matchx, matchy=matchy, hidex=hidex, hidey=hidey, unitsx=unitsx, unitsy=unitsy, scalex=scalex, scaley=scaley, xmax=xmax, ymax=ymax, space=space, **kwargs)
def addColorbar(self, **kwargs):
plt.colorbar(mappable=self.axis.get_images()[0], ax=self.axis, **kwargs)
def finishAxis(self, **kwargs):
self.formatAxis(**kwargs)
if 'saveData' in kwargs:
if kwargs['saveData']:
self.saveData(**kwargs)
if 'dpi' in kwargs:
if kwargs['dpi']:
self.fig.set_dpi(kwargs['dpi'])
if 'figSize' in kwargs:
if kwargs['figSize']:
self.fig.set_size_inches(kwargs['figSize'])
if 'legend' in kwargs:
if kwargs['legend'] is True:
self.addLegend(**kwargs)
elif type(kwargs['legend']) == dict:
self.addLegend(**kwargs['legend'])
if 'scalebar' in kwargs:
if kwargs['scalebar'] is True:
self.addScalebar()
elif type(kwargs['scalebar']) == dict:
self.addScalebar(**kwargs['scalebar'])
if 'colorbar' in kwargs:
if kwargs['colorbar'] is True:
self.addColorbar()
elif type(kwargs['colorbar']) == dict:
self.addColorbar(**kwargs['colorbar'])
if 'grid' in kwargs:
self.axis.minorticks_on()
if kwargs['grid'] is True:
self.axis.grid()
elif type(kwargs['grid']) == dict:
self.axis.grid(**kwargs['grid'])
# If this is the only axis on the figure, finish the figure
if type(self.metafig.ax) != list:
self.metafig.finishFig(**kwargs)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.metafig.orig_rcParams)
class ScatterPlotter(GeneralPlotter):
"""A class used for scatter plotting"""
def __init__(self, data, axis=None, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'scatter'
self.x = data.get('x')
self.y = data.get('y')
self.s = data.get('s')
self.c = data.get('c')
self.marker = data.get('marker')
self.linewidth = data.get('linewidth')
self.cmap = data.get('cmap')
self.norm = data.get('norm')
self.alpha = data.get('alpha')
self.linewidths = data.get('linewidths')
def plot(self, **kwargs):
scatterPlot = self.axis.scatter(x=self.x, y=self.y, s=self.s, c=self.c, marker=self.marker, linewidth=self.linewidth, cmap=self.cmap, norm=self.norm, alpha=self.alpha, linewidths=self.linewidths)
self.finishAxis(**kwargs)
return self.fig
class LinePlotter(GeneralPlotter):
"""A class used for plotting one line per subplot"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'line'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
def plot(self, **kwargs):
linePlot = self.axis.plot(self.x, self.y, color=self.color, marker=self.marker, markersize=self.markersize, linewidth=self.linewidth, alpha=self.alpha)
self.finishAxis(**kwargs)
return self.fig
class LinesPlotter(GeneralPlotter):
"""A class used for plotting multiple lines on the same axis"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'lines'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
self.label = data.get('label')
def plot(self, **kwargs):
numLines = len(self.y)
if type(self.color) != list:
colors = [self.color for line in range(numLines)]
else:
colors = self.color
if type(self.marker) != list:
markers = [self.marker for line in range(numLines)]
else:
markers = self.marker
if type(self.markersize) != list:
markersizes = [self.markersize for line in range(numLines)]
else:
markersizes = self.markersize
if type(self.linewidth) != list:
linewidths = [self.linewidth for line in range(numLines)]
else:
linewidths = self.linewidth
if type(self.alpha) != list:
alphas = [self.alpha for line in range(numLines)]
else:
alphas = self.alpha
if self.label is None:
labels = [None for line in range(numLines)]
else:
labels = self.label
for index, line in enumerate(self.y):
self.axis.plot(
self.x,
self.y[index],
color=colors[index],
marker=markers[index],
markersize=markersizes[index],
linewidth=linewidths[index],
alpha=alphas[index],
label=labels[index],
)
self.finishAxis(**kwargs)
return self.fig
class HistPlotter(GeneralPlotter):
"""A class used for histogram plotting"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'histogram'
self.x = data.get('x')
self.bins = data.get('bins', None)
self.range = data.get('range', None)
self.density = data.get('density', False)
self.weights = data.get('weights', None)
self.cumulative = data.get('cumulative', False)
self.bottom = data.get('bottom', None)
self.histtype = data.get('histtype', 'bar')
self.align = data.get('align', 'mid')
self.orientation = data.get('orientation', 'vertical')
self.rwidth = data.get('rwidth', None)
self.log = data.get('log', False)
self.color = data.get('color', None)
self.alpha = data.get('alpha', None)
self.label = data.get('label', None)
self.stacked = data.get('stacked', False)
self.data = data.get('data', None)
def plot(self, **kwargs):
histPlot = self.axis.hist(self.x, bins=self.bins, range=self.range, density=self.density, weights=self.weights, cumulative=self.cumulative, bottom=self.bottom, histtype=self.histtype, align=self.align, orientation=self.orientation, rwidth=self.rwidth, log=self.log, color=self.color, alpha=self.alpha, label=self.label, stacked=self.stacked, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class ImagePlotter(GeneralPlotter):
"""A class used for image plotting using plt.imshow"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'image'
self.X = data.get('X')
self.cmap = data.get('cmap', None)
self.norm = data.get('norm', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.alpha = data.get('alpha', None)
self.vmin = data.get('vmin', None)
self.vmax = data.get('vmax', None)
self.origin = data.get('origin', None)
self.extent = data.get('extent', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.filternorm = data.get('filternorm', True)
self.filterrad = data.get('filterrad', 4.0)
self.resample = data.get('resample', None)
self.url = data.get('url', None)
self.data = data.get('data', None)
def plot(self, **kwargs):
imagePlot = self.axis.imshow(self.X, cmap=self.cmap, norm=self.norm, aspect=self.aspect, interpolation=self.interpolation, alpha=self.alpha, vmin=self.vmin, vmax=self.vmax, origin=self.origin, extent=self.extent, filternorm=self.filternorm, filterrad=self.filterrad, resample=self.resample, url=self.url, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class AnchoredScaleBar(AnchoredOffsetbox):
"""
A class used for adding scale bars to plots
"""
def __init__(self, axis, sizex=0, sizey=0, labelx=None, labely=None, loc=4, pad=0.1, borderpad=0.1, sep=2, prop=None, barcolor="black", barwidth=None, **kwargs):
"""
Draw a horizontal and/or vertical bar with the size in data coordinate
of the give axes. A label will be drawn underneath (center-aligned).
- transform : the coordinate frame (typically axes.transData)
- sizex,sizey : width of x,y bar, in data units. 0 to omit
- labelx,labely : labels for x,y bars; None to omit
- loc : position in containing axes
- pad, borderpad : padding, in fraction of the legend font size (or prop)
- sep : separation between labels and bars in points.
- **kwargs : additional arguments passed to base class constructor
"""
from matplotlib.patches import Rectangle
from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea
bars = AuxTransformBox(axis.transData)
if sizex:
if axis.xaxis_inverted():
sizex = -sizex
bars.add_artist(Rectangle((0,0), sizex, 0, ec=barcolor, lw=barwidth, fc="none"))
if sizey:
if axis.yaxis_inverted():
sizey = -sizey
bars.add_artist(Rectangle((0,0), 0, sizey, ec=barcolor, lw=barwidth, fc="none"))
if sizex and labelx:
self.xlabel = TextArea(labelx)
bars = VPacker(children=[bars, self.xlabel], align="center", pad=0, sep=sep)
if sizey and labely:
self.ylabel = TextArea(labely)
bars = HPacker(children=[self.ylabel, bars], align="center", pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad, child=bars, prop=prop, frameon=False, **kwargs)
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
"""
Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes
- axis : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
def get_tick_size(subaxis):
tick_size = None
tick_locs = subaxis.get_majorticklocs()
if len(tick_locs)>1:
tick_size = np.abs(tick_locs[1] - tick_locs[0])
return tick_size
if matchx:
sizex = get_tick_size(axis.xaxis)
if matchy:
sizey = get_tick_size(axis.yaxis)
if 'sizex' in kwargs:
sizex = kwargs['sizex']
if 'sizey' in kwargs:
sizey = kwargs['sizey']
def autosize(value, maxvalue, scale, n=1, m=10):
round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m
while value > maxvalue:
try:
value = round_to_n(0.8 * maxvalue * scale, n, m) / scale
except:
value /= 10.0
m /= 10.0
return value
if ymax is not None and sizey>ymax:
sizey = autosize(sizey, ymax, scaley)
if xmax is not None and sizex>xmax:
sizex = autosize(sizex, xmax, scalex)
kwargs['sizex'] = sizex
kwargs['sizey'] = sizey
if unitsx is None:
unitsx = ''
if unitsy is None:
unitsy = ''
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy)
# add space for scalebar
if space is not None:
ylim0, ylim1 = axis.get_ylim()
ylim = (ylim0 - space, ylim1)
if ylim0 > ylim1: # if y axis is inverted
ylim = (ylim0 + space, ylim1)
axis.set_ylim(ylim)
scalebar = AnchoredScaleBar(axis, **kwargs)
axis.add_artist(scalebar)
if hidex:
axis.xaxis.set_visible(False)
if hidey:
axis.yaxis.set_visible(False)
if hidex and hidey:
axis.set_frame_on(False)
return scalebar
| StarcoderdataPython |
3402659 | <filename>sarif_om/_logical_location.py
# This file was generated by jschema_to_python version 1.2.3.
import attr
@attr.s
class LogicalLocation(object):
"""A logical location of a construct that produced a result."""
decorated_name = attr.ib(default=None, metadata={"schema_property_name": "decoratedName"})
fully_qualified_name = attr.ib(default=None, metadata={"schema_property_name": "fullyQualifiedName"})
index = attr.ib(default=-1, metadata={"schema_property_name": "index"})
kind = attr.ib(default=None, metadata={"schema_property_name": "kind"})
name = attr.ib(default=None, metadata={"schema_property_name": "name"})
parent_index = attr.ib(default=-1, metadata={"schema_property_name": "parentIndex"})
properties = attr.ib(default=None, metadata={"schema_property_name": "properties"})
| StarcoderdataPython |
6474124 | <gh_stars>0
# -*- encoding: utf-8 -*-
"""
MIT License
Copyright (c) 2019 - present AppSeed.us
"""
from django.db import models
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField
# Create your models here.
class Users(models.Model):
pass
class Buildings(models.Model):
name = models.CharField(max_length=200)
def get_building_name(self):
return self.name
def get_building_id(self):
return self.id
def __str__(self):
return self.name
class Floors(models.Model):
building = models.ForeignKey(Buildings, on_delete=models.PROTECT)
number = models.CharField(max_length=2)
def get_building_id(self):
return self.building
def get_floor_number(self):
return self.number
def get_floor_id(self):
return self.id
class Rooms(models.Model):
floor = models.ForeignKey(Floors, on_delete=models.PROTECT)
room_number = models.CharField(max_length=4)
max_occupancy = models.IntegerField(null=True)
max_pandemic_occupancy = models.IntegerField(null=True)
blueprint = models.URLField(null=True)
def get_floor_id(self):
return self.floor
def get_room_number(self):
return self.room_number
def get_room_id(self):
return self.id
def get_max_occupancy(self):
return self.max_occupancy
def get_max_pandemic_occupancy(self):
return self.max_pandemic_occupancy
def get_blueprint(self):
return self.blueprint
class Heatmaps(models.Model):
user = models.ForeignKey(Users, on_delete=models.PROTECT)
probabilities = ArrayField(
ArrayField(models.DecimalField(max_digits=11, decimal_places=10))
)
room = models.ForeignKey(Rooms, on_delete=models.PROTECT)
def get_user(self):
return self.user
def get_probabilities(self):
return self.probabilities
def get_room(self):
return self.room
| StarcoderdataPython |
8108359 | <reponame>allenalvin333/Hackerrank_Interview
# Udaan Preliminary Test
database = {} # User is key, appointment is value
for T in range(int(input())):
l = list(input().split())
if(l[0]=="add-user"): # Input Command Taken
if(l[1] in database): print("failure")
else:
print("success")
database[l[1]]=[]
elif(l[0]=="create-event"): # Input Command Taken
b = 0
for z in range(int(l[4])):
if(b==1): break
l[2:4] = list(map(int,l[2:4]))
for i in range(len(database[l[5+z]])):
k1,k2 = database[l[5+z]][i][1],database[l[5+z]][i][2]
if(l[1]==database[l[5+z]][i][0]):
if(any(x in range(k1,k1+k2) for x in range(l[2],l[2]+l[3]))):
print("failure")
b = 1
else:
for z in range(int(l[4])):
database[l[5+z]].append(l[1:])
print("success")
elif(l[0]=="show-events"): # Input Command Taken
for z in database[l[2]]:
if(z[0]==l[1]):
print("{}-{}".format(z[1],z[1]+z[2]),end=" ")
print(*z[4:])
elif(l[0]=="suggest-slot"): # Input Command Taken
m = list(range(int(l[2]),int(l[3])))
for z in l[6:]:
for y in database[z]:
if(y[0]==l[1]):
for a in range(y[1],y[1]+y[2]):
if a in m: m.remove(a)
for ele in m:
nr=0
for q in range(ele,ele+int(l[4])):
if q not in m: nr=1
if(nr==0):
print(ele)
break
else: print("none") | StarcoderdataPython |
9667880 | <reponame>RalphWalters/thredo
# Example 3
#
# Cancelling a thread
import thredo
def hello(sec):
print("Yawn")
try:
thredo.sleep(sec)
print("Awake")
except thredo.ThreadCancelled:
print("Cancelled!")
def main():
t = thredo.spawn(hello, 100)
thredo.sleep(5)
t.cancel()
print("Goodbye")
thredo.run(main)
| StarcoderdataPython |
8108960 | <reponame>liquidinstruments/pymoku
import math
import logging
import warnings
from pymoku._instrument import to_reg_unsigned
from pymoku._instrument import from_reg_unsigned
from pymoku._instrument import to_reg_signed
from pymoku._instrument import from_reg_signed
from pymoku._instrument import deprecated
from pymoku._instrument import MokuInstrument
from pymoku._instrument import needs_commit
from pymoku._instrument import ValueOutOfRangeException
from pymoku._instrument import DAC_SMP_RATE
from pymoku import _utils
from pymoku._trigger import Trigger
from pymoku._sweep_generator import SweepGenerator
warnings.simplefilter('always', DeprecationWarning)
log = logging.getLogger(__name__)
REG_BASE_MOD_0 = 43
REG_BASE_MOD_1 = 60
REG_BASE_WAV_0 = 80
REG_BASE_WAV_1 = 104
REG_GATETHRESH_L_CH1 = 76
REG_GATETHRESH_H_CH1 = 77
REG_GATETHRESH_L_CH2 = 78
REG_GATETHRESH_H_CH2 = 79
_WG_WAVE_SINE = 0
_WG_WAVE_SQUARE = 1
_WG_MOD_NONE = 0
_WG_MOD_AMPL = 1
_WG_MOD_FREQ = 2
_WG_MOD_PHASE = 4
_WG_MODSOURCE_INT = 0
_WG_MODSOURCE_ADC = 1
_WG_MODSOURCE_DAC = 2
_WG_FREQSCALE = 1.0e9 / 2**64
_WG_FREQSCALE_SQR = 1.0e9 / 2**48
_WG_PERIODSCALE_SQR = 2**48 - 1
_WG_RISESCALE = 2**24
_WG_MAX_RISE = 1.0 / (2 ** 39 - 1)
_WG_TIMESCALE = 1.0 / (2**32 - 1) # Doesn't wrap
_WG_MOD_FREQ_MAX = 62.5e6
_WG_MOD_DEPTH_MAX = 2.0 ** 31 - 1 # 100% modulation depth in bits
_WG_TRIG_ADC1 = 0
_WG_TRIG_ADC2 = 1
_WG_TRIG_DAC1 = 2
_WG_TRIG_DAC2 = 3
_WG_TRIG_EXT = 4
_WG_TRIG_INTER = 5
_WG_MOD_ADC1 = 0
_WG_MOD_ADC2 = 1
_WG_MOD_DAC1 = 2
_WG_MOD_DAC2 = 3
_WG_MOD_INTER = 4
_WG_MOD_GATE = 5
_WG_GATE_ADC = 0
_WG_GATE_DAC = 1
_WG_GATE_SWEEP = 2
_WG_GATE_EXT = 3
_WG_TRIG_MODE_OFF = 0
_WG_TRIG_MODE_GATE = 1
_WG_TRIG_MODE_START = 2
_WG_TRIG_MODE_NCYCLE = 3
_WG_TRIG_MODE_SWEEP = 4
_WG_TRIGLVL_ADC_MAX = 5.0
_WG_TRIGLVL_ADC_MIN = -5.0
_WG_TRIGLVL_DAC_MAX = 1.0
_WG_TRIGLVL_DAC_MIN = -1.0
class BasicWaveformGenerator(MokuInstrument):
"""
.. automethod:: pymoku.instruments.WaveformGenerator.__init__
"""
def __init__(self):
""" Create a new WaveformGenerator instance, ready to be attached to a
Moku."""
super(BasicWaveformGenerator, self).__init__()
self._register_accessors(_wavegen_reg_handlers)
self.id = 4
self.type = "signal_generator"
self._sweep1 = SweepGenerator(self, REG_BASE_WAV_0 + 3)
self._sweep2 = SweepGenerator(self, REG_BASE_WAV_1 + 3)
self.enable_reset_ch1 = False
self.enable_reset_ch2 = False
@needs_commit
def set_defaults(self):
super(BasicWaveformGenerator, self).set_defaults()
self.enable_ch1 = True
self.enable_ch2 = True
self.out1_amplitude = 0
self.out2_amplitude = 0
self.adc1_statuslight = False
self.adc2_statuslight = False
# Init channel sweep gens:
self._set_sweepgenerator(self._sweep1, 0, 0, 0, 0, 0, 0, 0)
self._set_sweepgenerator(self._sweep2, 0, 0, 0, 0, 0, 0, 0)
# Disable inputs on hardware that supports it
self.en_in_ch1 = True
self.en_in_ch2 = True
# Configure front end:
self._set_frontend(channel=1, fiftyr=True, atten=False, ac=False)
self._set_frontend(channel=2, fiftyr=True, atten=False, ac=False)
def _set_sweepgenerator(self, sweepgen, waveform=None, waitfortrig=None,
frequency=None, offset=None, logsweep=None,
duration=None, holdlast=None):
sweepgen.waveform = 2
sweepgen.stop = (2**64 - 1)
sweepgen.direction = 0
if waitfortrig is not None:
sweepgen.waitfortrig = waitfortrig
if offset is not None:
sweepgen.start = offset / 360.0 * (2**64 - 1)
if frequency is not None:
sweepgen.step = frequency / _WG_FREQSCALE
if duration is not None:
sweepgen.duration = duration * 125.0e6
if logsweep is not None:
sweepgen.logsweep = logsweep
if holdlast is not None:
sweepgen.holdlast = holdlast
@needs_commit
def gen_sinewave(self, ch, amplitude, frequency, offset=0, phase=0.0):
""" Generate a Sine Wave with the given parameters on the given
channel.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, [0.0,2.0] Vpp
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, [0,250e6] Hz
:param frequency: Frequency of the wave
:type offset: float, [-1.0,1.0] Volts
:param offset: DC offset applied to the waveform
:type phase: float, [0-360] degrees
:param phase: Phase offset of the wave
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'range', amplitude, [0.0, 2.0], 'sinewave amplitude', 'Volts')
_utils.check_parameter_valid(
'range', frequency, [0, 250e6], 'sinewave frequency', 'Hz')
_utils.check_parameter_valid(
'range', offset, [-1.0, 1.0], 'sinewave offset', 'Volts')
_utils.check_parameter_valid(
'range', phase, [0, 360], 'sinewave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude / 2.0)
lower_voltage = offset - (amplitude / 2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException(
"Sinewave offset limited by amplitude (max output "
"range 2.0Vpp).")
if ch == 1:
self.enable_ch1 = True
self._set_sweepgenerator(
sweepgen=self._sweep1, frequency=frequency, offset=phase)
self.amplitude_ch1 = amplitude
self.offset_ch1 = offset
self.waveform_type_ch1 = _WG_WAVE_SINE
self.phase_dly_ch1 = (11 * frequency / 125e6) % 1 * 2**32
elif ch == 2:
self.enable_ch2 = True
self._set_sweepgenerator(
sweepgen=self._sweep2, frequency=frequency, offset=phase)
self.amplitude_ch2 = amplitude
self.offset_ch2 = offset
self.waveform_type_ch2 = _WG_WAVE_SINE
self.phase_dly_ch2 = (11 * frequency / 125e6) % 1 * 2**32
@needs_commit
def gen_squarewave(self, ch, amplitude, frequency, offset=0.0,
duty=0.5, risetime=0.0, falltime=0.0, phase=0.0):
""" Generate a Square Wave with given parameters on the given channel.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, [0, 2.0] volts
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, [0, 100e6] hertz
:param frequency: Frequency of the wave
:type offset: float, [-1.0, 1.0] volts
:param offset: DC offset applied to the waveform
:type duty: float, [0, 1.0]
:param duty: Fractional duty cycle
:type risetime: float, [0, 1.0]
:param risetime: Fraction of a cycle taken for the waveform to rise
:type falltime: float [0, 1.0]
:param falltime: Fraction of a cycle taken for the waveform to fall
:type phase: float, degrees 0-360
:param phase: Phase offset of the wave
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'range', amplitude, [0.0, 2.0], 'squarewave amplitude', 'Volts')
_utils.check_parameter_valid(
'range', frequency, [0, 100e6], 'squarewave frequency', 'Hz')
_utils.check_parameter_valid(
'range', offset, [-1.0, 1.0], 'squarewave offset', 'Volts')
_utils.check_parameter_valid(
'range', duty, [0, 1.0], 'squarewave duty', 'cycles')
_utils.check_parameter_valid(
'range', risetime, [0, 1.0], 'squarewave risetime', 'cycles')
_utils.check_parameter_valid(
'range', falltime, [0, 1.0], 'squarewave falltime', 'cycles')
_utils.check_parameter_valid(
'range', phase, [0, 360], 'squarewave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude / 2.0)
lower_voltage = offset - (amplitude / 2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException(
"Squarewave offset limited by amplitude (max output "
"range 2.0Vpp).")
frequency = float(frequency)
if duty < risetime:
raise ValueOutOfRangeException(
"Squarewave duty too small for given rise time.")
elif duty + falltime > 1:
raise ValueOutOfRangeException(
"Squarewave duty and fall time too big.")
# ensure duty cycle and fall/rise time combinations don't overflow
if frequency != 0:
minedgetime = 4.0e-9 * frequency
if risetime < minedgetime:
risetime = minedgetime
log.warning(
"WARNING: Risetime restricted to minimum value of 4 ns.")
if falltime < minedgetime:
falltime = minedgetime
log.warning(
"WARNING: Falltime restricted to minimum value of 4 ns.")
if duty < minedgetime:
duty = minedgetime
log.warning("WARNING: Duty cycle restricted to %s" % duty)
if duty > 1 - minedgetime:
duty = 1 - minedgetime
log.warning("WARNING: Duty cycle restricted to %s" % duty)
if risetime > 1 - minedgetime:
risetime = 1 - minedgetime
log.warning("WARNING: Risetime restricted to maximum value.")
if falltime > 1 - minedgetime:
falltime = 1 - minedgetime
log.warning("WARNING: Falltime restricted to maximum value.")
else:
falltime = _WG_MAX_RISE
risetime = _WG_MAX_RISE
# Set rise/fall rate and t0, t1 and t2
t0 = risetime
t1 = duty
t2 = duty + falltime
phase_dly = 0
if ch == 1:
self.waveform_type_ch1 = _WG_WAVE_SQUARE
self.enable_ch1 = True
self._set_sweepgenerator(sweepgen=self._sweep1,
frequency=frequency,
offset=phase,
holdlast=0)
self.amplitude_ch1 = amplitude
self.offset_ch1 = offset
# This is overdefined, but saves the FPGA doing a tricky division
self.t0_ch1 = t0
self.t1_ch1 = t1
self.t2_ch1 = t2
self.riserate_ch1 = risetime
self.fallrate_ch1 = -falltime
self.phase_dly_ch1 = phase_dly
elif ch == 2:
self.waveform_type_ch2 = _WG_WAVE_SQUARE
self.enable_ch2 = True
self._set_sweepgenerator(sweepgen=self._sweep2,
frequency=frequency,
offset=phase,
holdlast=0)
self.amplitude_ch2 = amplitude
self.offset_ch2 = offset
self.t0_ch2 = t0
self.t1_ch2 = t1
self.t2_ch2 = t2
self.riserate_ch2 = risetime
self.fallrate_ch2 = -falltime
self.phase_dly_ch2 = phase_dly
@needs_commit
def gen_rampwave(
self, ch, amplitude, frequency, offset=0, symmetry=0.5, phase=0.0):
""" Generate a Ramp with the given parameters on the given channel.
This is a wrapper around the Square Wave generator,
using the *riserate* and *fallrate* parameters to form the ramp.
:type ch: int; {1,2}
:param ch: Channel on which to generate the wave
:type amplitude: float, [0, 2.0] volts
:param amplitude: Waveform peak-to-peak amplitude
:type frequency: float, [0, 100e6] hertz
:param frequency: Frequency of the wave
:type offset: float, [-1.0, 1.0] volts
:param offset: DC offset applied to the waveform
:type symmetry: float, [0, 1.0]
:param symmetry: Fraction of the cycle rising.
:type phase: float, degrees [0, 360]
:param phase: Phase offset of the wave
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'range', amplitude, [0.0, 2.0], 'rampwave amplitude', 'Volts')
_utils.check_parameter_valid(
'range', frequency, [0, 100e6], 'rampwave frequency', 'Hz')
_utils.check_parameter_valid(
'range', offset, [-1.0, 1.0], 'rampwave offset', 'cycles')
_utils.check_parameter_valid(
'range', symmetry, [0, 1.0], 'rampwave symmetry', 'fraction')
_utils.check_parameter_valid(
'range', phase, [0, 360], 'rampwave phase', 'degrees')
# Ensure offset does not cause signal to exceed allowable 2.0Vpp range
upper_voltage = offset + (amplitude / 2.0)
lower_voltage = offset - (amplitude / 2.0)
if (upper_voltage > 1.0) or (lower_voltage < -1.0):
raise ValueOutOfRangeException(
"Rampwave offset limited by amplitude "
"(max output range 2.0Vpp).")
self.gen_squarewave(ch, amplitude, frequency,
offset=offset, duty=symmetry,
risetime=symmetry,
falltime=1 - symmetry,
phase=phase)
@needs_commit
def sync_phase(self):
""" Synchronize the phase of both output channels.
The phase of both channels is reset to their respestive phase offset
values.
"""
self.enable_reset_ch1 = True
self.enable_reset_ch2 = True
@needs_commit
def gen_off(self, ch=None):
""" Turn Waveform Generator output(s) off.
The channel will be turned on when configuring the waveform type but
can be turned off using this function. If *ch* is None (the default),
both channels will be turned off, otherwise just the one specified by
the argument.
:type ch: int; {1,2} or None
:param ch: Channel to turn off, or both.
"""
_utils.check_parameter_valid(
'set', ch, [1, 2], 'output channel', allow_none=True)
if ch is None or ch == 1:
self.enable_ch1 = False
if ch is None or ch == 2:
self.enable_ch2 = False
class WaveformGenerator(BasicWaveformGenerator):
""" Waveform Generator instrument object.
To run a new Waveform Generator instrument, this should be instantiated
and deployed via a connected :any:`Moku` object using :any:
`deploy_instrument`. Alternatively, a pre-configured instrument object can
be obtained by discovering an already running Waveform Generator
instrument on a Moku:Lab device via
:any:`discover_instrument`.
.. automethod:: pymoku.instruments.WaveformGenerator.__init__
.. attribute:: type
:annotation: = "signal_generator"
Name of this instrument.
"""
def __init__(self):
""" Create a new WaveformGenerator instance, ready to be attached to a
Moku."""
super(WaveformGenerator, self).__init__()
self._register_accessors(_wavegen_mod_reg_handlers)
# Define any (non-register-mapped) properties that are used when
# committing as a commit is called when the instrument is set running
self.trig_volts_ch1 = 0.0
self.trig_volts_ch2 = 0.0
self._trigger1 = Trigger(self, 28)
self._trigger2 = Trigger(self, 45)
self._sweepmod1 = SweepGenerator(self, 34)
self._sweepmod2 = SweepGenerator(self, 51)
@needs_commit
def set_defaults(self):
super(WaveformGenerator, self).set_defaults()
self._init_trig_modulation(1)
self._init_trig_modulation(2)
self.phasedly_en_ch1 = 1
self.phasedly_en_ch2 = 1
self.sine_trigdly_ch1 = 0
self.sine_trigdly_ch2 = 0
def _init_trig_modulation(self, ch):
# initialise the state of all modules used in modulation/trigger/sweep
# modes
if ch == 1:
# Set AM/FM/PM and sweep enable to zero:
self.amod_enable_ch1 = False
self.fmod_enable_ch1 = False
self.pmod_enable_ch1 = False
self.sweep_enable_ch1 = False
# Default trigger module values:
self._trigger1.trigtype = 0
self._trigger1.edge = 0
self._trigger1.pulsetype = 0
self._trigger1.hysteresis = 0
self._trigger1.timer = 0
self._trigger1.holdoff = 0
self._trigger1.auto_holdoff = 0
self._trigger1.ntrigger = 0
self._trigger1.ntrigger_mode = 0
self._trigger1.level = 0
self._trigger1.duration = 0
# Default modulating sweep generator values:
self._sweepmod1.waveform = 0
self._sweepmod1.waitfortrig = 0
self._sweepmod1.holdlast = 0
self._sweepmod1.direction = 0
self._sweepmod1.logsweep = 0
self._sweepmod1.start = 0
self._sweepmod1.stop = 0
self._sweepmod1.step = 0
self._sweepmod1.duration = 0
# Trigger/modulation/gate source/threshold default values:
self.trig_source_ch1 = _WG_TRIG_ADC1
self.mod_source_ch1 = _WG_MOD_ADC1
self.gate_thresh_ch1 = 0
self.mod_depth_ch1 = 0
# Default waveform sweep generator values that are touched in
# modulation/trigger/sweep modes:
self._sweep1.waitfortrig = 0
self._sweep1.duration = 0
self._sweep1.holdlast = 0
# Gated mode flag used to toggle amplitude division by 2 on
# the FPGA
self.gate_mode_ch1 = 0
# Trigger mode flag to enable calibration calculations in
# _update_dependent_regs function
self.trig_sweep_mode_ch1 = 0
# Phase delay flag, trig delay flag
self.phasedly_en_ch1 = 1
self.sine_trigdly_ch1 = 0
else:
# Set AM/FM/PM and sweep enable to zero:
self.amod_enable_ch2 = False
self.fmod_enable_ch2 = False
self.pmod_enable_ch2 = False
self.sweep_enable_ch2 = False
# Default trigger module values:
self._trigger2.trigtype = 0
self._trigger2.edge = 0
self._trigger2.pulsetype = 0
self._trigger2.hysteresis = 0
self._trigger2.timer = 0
self._trigger2.holdoff = 0
self._trigger2.auto_holdoff = 0
self._trigger2.ntrigger = 0
self._trigger2.ntrigger_mode = 0
self._trigger2.level = 0
self._trigger2.duration = 0
# Default modulating sweep generator values:
self._sweepmod2.waveform = 0
self._sweepmod2.waitfortrig = 0
self._sweepmod2.holdlast = 0
self._sweepmod2.direction = 0
self._sweepmod2.logsweep = 0
self._sweepmod2.start = 0
self._sweepmod2.stop = 0
self._sweepmod2.step = 0
self._sweepmod2.duration = 0
# Trigger/modulation/gate source/threshold default values:
self.trig_source_ch2 = _WG_TRIG_ADC2
self.mod_source_ch2 = _WG_MOD_ADC2
self.gate_thresh_ch2 = 0
self.mod_depth_ch2 = 0
# Default waveform sweep generator values that are touched in
# modulation/trigger/sweep modes:
self._sweep2.waitfortrig = 0
self._sweep2.duration = 0
self._sweep2.holdlast = 0
# Gated mode flag used to toggle amplitude division by 2 on
# the FPGA
self.gate_mode_ch2 = 0
# Trigger mode flag to enable calibration calculations in
# _update_dependent_regs function
self.trig_sweep_mode_ch2 = 0
# Phase delay flag, trig delay flag
self.phasedly_en_ch2 = 1
self.sine_trigdly_ch2 = 0
@needs_commit
@deprecated(category='param',
message="'in' and 'out' trigger sources have been deprecated."
" Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.")
def set_trigger(self, ch, mode, ncycles=1, sweep_start_freq=None,
sweep_end_freq=0, sweep_duration=1.0e-3,
trigger_source='adc1', trigger_threshold=0.0,
internal_trig_period=1.0, internal_trig_high=0.5):
""" Configure gated, start, ncycle or sweep trigger mode on target
channel.
The trigger event can come from an ADC input channel, the opposite
generated waveform, the external trigger input (for hardware that
supports that) or a internally-generated clock of configurable
period.
The trigger event can be used in several different ways:
- *gated*: The output waveform is only generated while the trigger is
asserted
- *start*: The output waveform is enabled once the trigger event fires
- *ncycle*: The output waveform starts at a trigger event and
completes the given number of cycles, before turning off and
re-arming
- *sweep*: The trigger event starts the waveform generation at the
*sweep_start_freq*, before automatically sweeping the
frequency to *sweep_end_freq* over the course of *sweep_duration*
seconds.
:type ch: int
:param ch: target channel.
:type mode: string, {'gated', 'start', 'ncycle', 'sweep', 'off'}
:param mode: Select the mode in which the trigger is operated.
:type ncycles: int, [1, 1e6]
:param ncycles: integer number of signal repetitions in ncycle mode.
:type sweep_start_freq: float, [0.0,250.0e6], hertz
:param sweep_start_freq: starting sweep frequency, set to current
waveform frequency if not specified. Value range may vary for
different waveforms.
:type sweep_end_freq: float, [0.0,250.0e6], hertz
:param sweep_end_freq: finishing sweep frequency. Value range may vary
for different waveforms.
:type sweep_duration: float, [1.0e-3,1000.0], seconds
:param sweep_duration: sweep duration in seconds.
:type trigger_source: string {'adc1','adc2', 'dac1', 'dac2',
'external', 'internal', 'in', 'out'}
:param trigger_source: defines which source should be used as
triggering signal. In and out sources are deprecated.
:type trigger_threshold: float, [-5, 5], volts
:param trigger_threshold: The threshold value range dependes on the
source and the attenution used. Values ranges might be less for
different settings.
:type internal_trig_period: float, [0,1e11], seconds
:param internal_trig_period: period of the internal trigger clock,
if used.
:type internal_trig_high: float, [0,1e11], seconds
:param internal_trig_high: High time of the internal trigger clock,
if used. Must be less than the internal trigger period.
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'output channel')
_utils.check_parameter_valid(
'set', mode, ['gated', 'start', 'ncycle', 'sweep'], 'trigger mode')
_utils.check_parameter_valid(
'set', trigger_source, ['adc1',
'adc2',
'dac1',
'dac2',
'external',
'internal',
'in',
'out'], 'trigger source')
_utils.check_parameter_valid('range', ncycles, [1, 1e6], 'ncycles')
_utils.check_parameter_valid(
'range', sweep_duration, [0.001, 1000.0],
'sweep duration', 'seconds')
_utils.check_parameter_valid(
'range', internal_trig_period, [100.0e-9, 1000.0],
'internal trigger period', 'seconds')
_utils.check_parameter_valid(
'range', internal_trig_high, [10.0e-9, 1000.0],
'internal trigger high time', 'seconds')
if trigger_source in ['in', 'out']:
warnings.warn(
message="'in' and 'out' trigger sources have been deprecated. "
"Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.",
category=DeprecationWarning,
stacklevel=1
)
# 'in' and 'out' trigger sources are deprecated sources.
# Convert to adc/dac source type:
if ch == 1:
if trigger_source == 'in':
trigger_source = 'adc1'
elif trigger_source == 'out':
trigger_source = 'dac2'
if ch == 2:
if trigger_source == 'in':
trigger_source = 'adc2'
elif trigger_source == 'out':
trigger_source = 'dac1'
# Can't use current channel as trigger mode source:
if ch == 1 and trigger_source == 'dac1':
raise ValueOutOfRangeException(
"dac1 cannot be used as the trigger source for trigger "
"mode on channel 1.")
elif ch == 2 and trigger_source == 'dac2':
raise ValueOutOfRangeException(
"dac2 cannot be used as the trigger source for trigger "
"mode on channel 2.")
# Can't use modulation with trigger/sweep modes
self.set_modulate_trig_off(ch)
# Configure trigger and source settings:
if ch == 1:
_WG_TRIG_ADC = _WG_TRIG_ADC2
_WG_TRIG_DAC = _WG_TRIG_DAC1
else:
_WG_TRIG_ADC = _WG_TRIG_ADC1
_WG_TRIG_DAC = _WG_TRIG_DAC2
_str_to_trigger_source = {
'adc1': _WG_TRIG_ADC1,
'adc2': _WG_TRIG_ADC2,
'dac1': _WG_TRIG_DAC1,
'dac2': _WG_TRIG_DAC2,
'external': _WG_TRIG_EXT,
'internal': _WG_TRIG_INTER
}
trigger_source = _utils.str_to_val(_str_to_trigger_source,
trigger_source,
'trigger source')
if trigger_source is _WG_TRIG_ADC:
_utils.check_parameter_valid('range', trigger_threshold,
[_WG_TRIGLVL_ADC_MIN,
_WG_TRIGLVL_ADC_MAX],
'trigger threshold', 'Volts')
elif trigger_source is _WG_TRIG_DAC:
_utils.check_parameter_valid('range', trigger_threshold,
[_WG_TRIGLVL_DAC_MIN,
_WG_TRIGLVL_DAC_MAX],
'trigger threshold', 'Volts')
# The internal trigger's duty cycle is only used in gated burst mode.
# Duty cycle is limited such that the duty period is not
# less than 8 ns and not greater than the trigger period minus 8 ns.
if internal_trig_high > internal_trig_period:
raise ValueOutOfRangeException(
"Internal trigger high must be less"
" than or equal to the internal trigger period.")
if (internal_trig_period - internal_trig_high) <= 8.0e-9:
internal_trig_high = internal_trig_period - 10.0e-9
if ch == 1:
self._trigger1.trigtype = 0
self._trigger1.edge = 0
self.trig_sweep_mode_ch1 = 1
elif ch == 2:
self._trigger1.trigtype = 0
self._trigger1.edge = 0
self.trig_sweep_mode_ch2 = 1
# Configure trigger mode settings:
_str_to_trigger_mode = {
'gated': _WG_TRIG_MODE_GATE,
'start': _WG_TRIG_MODE_START,
'ncycle': _WG_TRIG_MODE_NCYCLE,
'sweep': _WG_TRIG_MODE_SWEEP
}
mode = _utils.str_to_val(_str_to_trigger_mode, mode, 'trigger mode')
# set status light register
if ch == 1:
self.adc1_statuslight = True if (
trigger_source == _WG_TRIG_ADC1) else False
else:
self.adc2_statuslight = True if (
trigger_source == _WG_TRIG_ADC2) else False
if sweep_start_freq is None or mode != _WG_TRIG_MODE_SWEEP:
channel_frequency = (self._sweep1.step * _WG_FREQSCALE) \
if ch == 1 else (self._sweep2.step * _WG_FREQSCALE)
else:
channel_frequency = sweep_start_freq
waveform = self.waveform_type_ch1 if ch == 1 else \
self.waveform_type_ch2
# if waveform is a sinewave certain ranges do change
if waveform == _WG_WAVE_SINE:
_utils.check_parameter_valid('range',
sweep_end_freq,
[0.0, 250.0e6],
'sweep finishing frequency',
'frequency')
_utils.check_parameter_valid('range',
channel_frequency,
[0.0, 250.0e6],
'sweep starting frequency',
'frequency')
else:
_utils.check_parameter_valid('range',
sweep_end_freq,
[0.0, 100.0e6],
'sweep finishing frequency',
'frequency')
_utils.check_parameter_valid('range',
channel_frequency,
[0.0, 100.0e6],
'sweep starting frequency',
'frequency')
# minimum frequency deviation in sweep mode is 1 mHz
if abs(channel_frequency - sweep_end_freq) < 1.0e-3:
raise ValueOutOfRangeException(
"Frequency deviation in sweep mode is restricted to values "
"greater than 1 mHz.")
if mode == _WG_TRIG_MODE_GATE:
self._set_trigger_gated(ch, waveform, trigger_source,
trigger_threshold, internal_trig_period,
internal_trig_high)
elif mode == _WG_TRIG_MODE_START:
self._set_trigger_start(ch, trigger_source, trigger_threshold)
elif mode == _WG_TRIG_MODE_NCYCLE:
self._set_trigger_ncycle(ch, channel_frequency, ncycles,
trigger_threshold, trigger_source,
internal_trig_period)
elif mode == _WG_TRIG_MODE_SWEEP:
self._set_trigger_sweep(ch, waveform, trigger_source,
sweep_end_freq, channel_frequency,
sweep_duration, trigger_threshold)
def _set_trigger_gated(self, ch, waveform, trigger_source,
trigger_threshold, internal_trig_period,
internal_trig_high):
# Threshold calculations. Calibration is applied in
# _update_dependent_regs
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
elif trigger_source == _WG_TRIG_INTER:
trigger_threshold = -2 ** 47 + (
1.0 - internal_trig_high / internal_trig_period) * (
2 ** 48 - 1)
if ch == 1:
self._sweepmod1.step = 1 / internal_trig_period / _WG_FREQSCALE
self._sweepmod1.waveform = 2
self._sweepmod1.direction = 1
else:
self._sweepmod2.step = 1 / internal_trig_period / _WG_FREQSCALE
self._sweepmod2.waveform = 2
self._sweepmod2.direction = 1
if ch == 1:
self.amod_enable_ch1 = True
self.mod_source_ch1 = _WG_MOD_GATE
self.mod_depth_uncalibrated_ch1 = 1.0
self._sweep1.waitfortrig = 0
self.trig_source_ch1 = trigger_source
self.gate_thresh_uncalibrated_ch1 = trigger_threshold
self.gate_mode_ch1 = 1
elif ch == 2:
self.amod_enable_ch2 = True
self.mod_source_ch2 = _WG_MOD_GATE
self.mod_depth_uncalibrated_ch2 = 1.0
self._sweep2.waitfortrig = 0
self.trig_source_ch2 = trigger_source
self.gate_thresh_uncalibrated_ch2 = trigger_threshold
self.gate_mode_ch2 = 1
def _set_trigger_start(self, ch, trigger_source, trigger_threshold):
# Internal trigger source cannot be used for burst start mode:
if trigger_source == _WG_TRIG_INTER:
raise ValueOutOfRangeException("The internal trigger source cannot"
" be used in start burst mode.")
# Calculate threshold level and configure modulating sweep generator.
# Calibration is added to threshold in _set_dependent_regs.
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
if ch == 1:
self._sweepmod1.direction = 1
elif ch == 2:
self._sweepmod2.direction = 1
if ch == 1:
self.trigger_threshold_uncalibrated_ch1 = trigger_threshold
self.trig_source_ch1 = trigger_source
self._sweep1.waitfortrig = 1
self._sweep1.duration = 0
self.enable_reset_ch1 = True
self.phasedly_en_ch1 = 0
self.sine_trigdly_ch1 = 1 if self.waveform_type_ch1 == \
_WG_WAVE_SINE else 0
elif ch == 2:
self.trigger_threshold_uncalibrated_ch2 = trigger_threshold
self.trig_source_ch2 = trigger_source
self._sweep2.waitfortrig = 1
self._sweep2.duration = 0
self.enable_reset_ch2 = True
self.phasedly_en_ch2 = 0
self.sine_trigdly_ch2 = 1 if self.waveform_type_ch2 == \
_WG_WAVE_SINE else 0
def _set_trigger_ncycle(self, ch, channel_frequency, ncycles,
trigger_threshold, trigger_source,
internal_trig_period):
# Waveform frequencies are restricted to <= 10 MHz in Ncycle burst
# mode:
if channel_frequency > 10.0e6:
raise ValueOutOfRangeException(
"Waveform frequencies are restricted to 10 MHz or less in"
" Ncycle burst mode.")
# Calculate threshold level and configure modulating sweep generator.
# Calibration is added to threshold in _set_dependent_regs.
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
elif trigger_source == _WG_TRIG_INTER:
trigger_threshold = 0
if ch == 1:
self._set_sweepgenerator(sweepgen=self._sweepmod1, waveform=2,
waitfortrig=0,
frequency=1.0 / internal_trig_period,
offset=0, logsweep=0, duration=0,
holdlast=0)
self._sweepmod1.direction = 1
elif ch == 2:
self._set_sweepgenerator(sweepgen=self._sweepmod2, waveform=2,
waitfortrig=0,
frequency=1.0 / internal_trig_period,
offset=0,
logsweep=0,
duration=0,
holdlast=0)
self._sweepmod2.direction = 1
# ensure combination of signal frequency and Ncycles doesn't cause
# 64 bit register overflow:
FPGA_cycles = (math.floor(
125e6 / channel_frequency * ncycles) - 1) if \
channel_frequency != 0.0 else 0
if FPGA_cycles > 2**63 - 1:
raise ValueOutOfRangeException("NCycle Register Overflow")
if ch == 1:
self.trigger_threshold_uncalibrated_ch1 = trigger_threshold
self.trig_source_ch1 = trigger_source
self._sweep1.waitfortrig = 1
self._sweep1.duration = FPGA_cycles
self._sweep1.holdlast = 0
self.enable_reset_ch1 = True
self.phasedly_en_ch1 = 0
self.sine_trigdly_ch1 = 1 if \
self.waveform_type_ch1 == _WG_WAVE_SINE else 0
elif ch == 2:
self.trigger_threshold_uncalibrated_ch2 = trigger_threshold
self.trig_source_ch2 = trigger_source
self._sweep2.waitfortrig = 1
self._sweep2.duration = FPGA_cycles
self._sweep2.holdlast = 0
self.enable_reset_ch2 = True
self.phasedly_en_ch2 = 0
self.sine_trigdly_ch2 = 1 if \
self.waveform_type_ch2 == _WG_WAVE_SINE else 0
def _set_trigger_sweep(self, ch, waveform, trigger_source, sweep_end_freq,
channel_frequency, sweep_duration,
trigger_threshold):
# Calculate threshold level and enable/disable continuous sweep.
# Calibration is added to threshold in _set_dependent_regs.
if trigger_source == _WG_TRIG_EXT:
trigger_threshold = 0
mod_continuous_sweep = 1
elif trigger_source == _WG_TRIG_INTER:
trigger_threshold = 1
mod_continuous_sweep = 0
else:
mod_continuous_sweep = 1
# calculate sweep parameters:
mod_start_freq = 0
range_shift = 0
deltafreq_persecond = (sweep_end_freq - channel_frequency) / (
sweep_duration)
mod_step = abs(2.0**64 / 1e18 * deltafreq_persecond)
mod_duration_FPGAcycles = math.floor(sweep_duration * 125e6)
mod_stop_freq = mod_step * 1e9 * sweep_duration
range_shift = min(
math.floor(abs(math.log(max(mod_step / 2.0**64,
mod_stop_freq / 2.0**64), 2))), 63)
mod_step *= 2**range_shift
mod_stop_freq *= 2**range_shift
# check if reverse sweep:
if (sweep_end_freq - channel_frequency) < 0:
mod_direction = 1
else:
mod_direction = 0
if ch == 1:
self._set_sweepgenerator(sweepgen=self._sweep1,
frequency=channel_frequency,
waitfortrig=0)
self._sweepmod1.waitfortrig = mod_continuous_sweep
self._sweepmod1.start = mod_start_freq
self._sweepmod1.stop = mod_stop_freq
self._sweepmod1.step = mod_step
self._sweepmod1.duration = mod_duration_FPGAcycles
self._sweepmod1.direction = 0
self.reverse_sweep_ch1 = mod_direction
self._sweepmod1.waveform = 2
self._sweepmod1.holdlast = 0
self.amod_enable_ch1 = False
self.pmod_enable_ch1 = False
self.fmod_enable_ch1 = False
self.sweep_enable_ch1 = True
self.trig_source_ch1 = trigger_source
self.trigger_threshold_uncalibrated_ch1 = trigger_threshold
self.range_shift_ch1 = range_shift
else:
self._set_sweepgenerator(sweepgen=self._sweep2,
frequency=channel_frequency,
waitfortrig=0)
self._sweepmod2.waitfortrig = mod_continuous_sweep
self._sweepmod2.start = mod_start_freq
self._sweepmod2.stop = mod_stop_freq
self._sweepmod2.step = mod_step
self._sweepmod2.duration = mod_duration_FPGAcycles
self._sweepmod2.direction = 0
self.reverse_sweep_ch2 = mod_direction
self._sweepmod2.waveform = 2
self._sweepmod2.holdlast = 0
self.amod_enable_ch2 = False
self.pmod_enable_ch2 = False
self.fmod_enable_ch2 = False
self.sweep_enable_ch2 = True
self.trig_source_ch2 = trigger_source
self.trigger_threshold_uncalibrated_ch2 = trigger_threshold
self.range_shift_ch2 = range_shift
@needs_commit
@deprecated(category='method', message="'gen_modulate_off' has been "
"deprecated. Use set_modulate_trig_off instead.")
def gen_modulate_off(self, ch=None):
"""
'gen_modulate_off' has been deprecated. Use set_modulate_trig_off
instead.
Turn off modulation for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn modulation off.
"""
# warnings.warn("'gen_modulate_off' has been deprecated. Use
# set_modulate_trig_off instead.", DeprecationWarning)
self.set_modulate_trig_off(ch)
@needs_commit
@deprecated(category='method', message="'gen_trigger_off' has been "
"deprecated. Use set_modulate_trig_off instead.")
def gen_trigger_off(self, ch=None):
"""
'gen_trigger_off' has been deprecated. Use set_modulate_trig_off
instead."
Turn off trigger/sweep mode for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn trigger/sweep mode off
"""
# warnings.warn("'gen_trigger_off' has been deprecated. Use
# set_modulate_trig_off instead.", DeprecationWarning)
self.set_modulate_trig_off(ch)
@needs_commit
def set_modulate_trig_off(self, ch=None):
"""
Turn off modulation and trigger modes for the specified output channel.
If *ch* is None (the default), both channels will be turned off,
otherwise just the one specified by the argument.
:type ch: int; {1,2} or None
:param ch: Output channel to turn modulation off.
"""
_utils.check_parameter_valid('set', ch, [1, 2],
'output channel', allow_none=True)
self._init_trig_modulation(ch)
@needs_commit
@deprecated(category='param',
message="'in' and 'out' modulation sources have been "
"deprecated. Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.")
def gen_modulate(self, ch, mtype, source, depth, frequency=0.0):
"""
Set up modulation on an output channel.
:type ch: int; {1,2}
:param ch: Channel to modulate
:type mtype: string, {'amplitude', 'frequency', 'phase'}
:param mtype: Modulation type. Respectively Off, Amplitude, Frequency
and Phase modulation.
:type source: string,
{'adc1', 'adc2', 'dac1', 'dac2', 'internal', 'in', 'out'}
:param source: Modulation source. Respectively Internal Sinewave,
associated input channel or opposite output channel.
In and out sources are deprecated.
:type depth: float 0-1, 0-125MHz or 0 - 360 deg
:param depth: Modulation depth (depends on modulation type):
Fractional modulation depth, Frequency Deviation/Volt or +/-
phase shift/Volt
:type frequency: float
:param frequency: Frequency of internally-generated sine wave
modulation. This parameter is ignored if the source is set to
ADC or DAC.
:raises ValueOutOfRangeException: if the channel number is invalid or
modulation parameters can't be achieved
"""
_utils.check_parameter_valid('set', ch, [1, 2], 'modulation channel')
_utils.check_parameter_valid(
'range', frequency, [0, 250e6], 'internal modulation frequency')
_utils.check_parameter_valid(
'set', mtype, ['amplitude',
'frequency',
'phase'], 'modulation type')
_utils.check_parameter_valid(
'set', source, ['adc1',
'adc2',
'dac1',
'dac2',
'internal',
'in',
'out'], 'modulation source')
if source in ['in', 'out']:
warnings.warn(
message="'in' and 'out' modulation sources have been "
"deprecated. Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.",
category=DeprecationWarning,
stacklevel=1
)
# 'in' and 'out' sources are deprecated sources. Convert to adc/dac
# source type:
if ch == 1:
if source == 'in':
source = 'adc1'
elif source == 'out':
source = 'dac2'
if ch == 2:
if source == 'in':
source = 'adc2'
elif source == 'out':
source = 'dac1'
# Can't use current channel as trigger mode source:
if ch == 1 and source == 'dac1':
raise ValueOutOfRangeException(
"dac1 cannot be used as the modulation source for channel 1.")
elif ch == 2 and source == 'dac2':
raise ValueOutOfRangeException(
"dac2 cannot be used as the modulation source for channel 2.")
_str_to_modsource = {
'adc1': _WG_MOD_ADC1,
'adc2': _WG_MOD_ADC2,
'dac1': _WG_MOD_DAC1,
'dac2': _WG_MOD_DAC2,
'internal': _WG_MOD_INTER
}
_str_to_modtype = {
'amplitude': _WG_MOD_AMPL,
'frequency': _WG_MOD_FREQ,
'phase': _WG_MOD_PHASE
}
source = _utils.str_to_val(
_str_to_modsource, source, 'modulation source')
mtype = _utils.str_to_val(
_str_to_modtype, mtype, 'modulation source')
# Maximum achievable modulation depth is limited when frontend
# attenuation is not enabled
if self.atten_compensate_ch1 == 0:
logging.warning("+/- 0.5 V voltage range is selected on input "
"channel 1. Maximum achievable modulation depth "
"may be limited.")
if self.atten_compensate_ch2 == 0:
logging.warning("+/- 0.5 V voltage range is selected on input "
"channel 2. Maximum achievable modulation depth "
"may be limited.")
# Calculate the depth value depending on modulation source and type.
# Calibration calculations for frontend variations done in
# _update_dependent_regs.
depth_parameter = 0.0
if mtype == _WG_MOD_AMPL:
_utils.check_parameter_valid('range', depth, [0.0, 1.0],
'amplitude modulation depth',
'fraction')
depth_parameter = depth
elif mtype == _WG_MOD_FREQ:
_utils.check_parameter_valid(
'range', depth, [0.0, _WG_MOD_FREQ_MAX],
'frequency modulation depth', 'Hz/V')
depth_parameter = depth / (DAC_SMP_RATE / 8.0)
elif mtype == _WG_MOD_PHASE:
_utils.check_parameter_valid(
'range', depth, [0.0, 360.0],
'phase modulation depth', 'degrees/V')
depth_parameter = depth / 360.0
# Can't use trigger/sweep modes at the same time as modulation
self.set_modulate_trig_off(ch)
if ch == 1:
self.mod_depth_uncalibrated_ch1 = depth_parameter
self.mod_source_ch1 = source
self.amod_enable_ch1 = True if mtype == _WG_MOD_AMPL else False
self.fmod_enable_ch1 = True if mtype == _WG_MOD_FREQ else False
self.pmod_enable_ch1 = True if mtype == _WG_MOD_PHASE else False
self.sweep_enable_ch1 = False
if source == _WG_MOD_INTER:
self._set_sweepgenerator(sweepgen=self._sweepmod1,
waveform=2,
waitfortrig=0,
frequency=frequency,
offset=0,
logsweep=0,
duration=0)
self.adc1_statuslight = True if \
source == _WG_MODSOURCE_ADC else False
elif ch == 2:
self.mod_depth_uncalibrated_ch2 = depth_parameter
self.mod_source_ch2 = source
self.amod_enable_ch2 = True if mtype == _WG_MOD_AMPL else False
self.fmod_enable_ch2 = True if mtype == _WG_MOD_FREQ else False
self.pmod_enable_ch2 = True if mtype == _WG_MOD_PHASE else False
self.sweep_enable_ch2 = False
if source == _WG_MOD_INTER:
self._set_sweepgenerator(sweepgen=self._sweepmod2,
waveform=2,
waitfortrig=0,
frequency=frequency,
offset=0,
logsweep=0,
duration=0)
self.adc2_statuslight = True if \
source == _WG_MODSOURCE_ADC else False
def _get_mod_depth_uncalibrated(self, ch):
# Calculate mod depth based on instrument state. Used when connecting
# to running device.
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
mod_source_scalers = [2.0**11 / (8.0 if self.atten_compensate_ch1
else 1.0) * adc1,
2.0**11 / (8.0 if self.atten_compensate_ch2
else 1.0) * adc2,
2.0**14 * dac1,
2.0**14 * dac2,
1.0,
1.0]
if ch == 1:
mod_depth_uncalibrated = self.mod_depth_ch1 / \
mod_source_scalers[self.mod_source_ch1] / _WG_MOD_DEPTH_MAX
else:
mod_depth_uncalibrated = self.mod_depth_ch2 / \
mod_source_scalers[self.mod_source_ch2] / _WG_MOD_DEPTH_MAX
return mod_depth_uncalibrated
def _get_gate_thresh_uncalibrated(self, ch):
# Calculate gate threshold based on instrument state. Used when
# connecting to running device.
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
gate_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
if ch == 1:
gate_thresh_uncalibrated = self.gate_thresh_ch1 * \
gate_source_scalers[self.trig_source_ch1]
else:
gate_thresh_uncalibrated = self.gate_thresh_ch2 * \
gate_source_scalers[self.trig_source_ch2]
return gate_thresh_uncalibrated
def _get_trig_thresh_uncalibrated(self, ch):
# Calculate trig threshold based on instrument state. Used when
# connecting to running device.
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
trig_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
if ch == 1:
trig_threshold_uncalibrated = self._trigger1.level * \
trig_source_scalers[self.trig_source_ch1]
else:
trig_threshold_uncalibrated = self._trigger2.level * \
trig_source_scalers[self.trig_source_ch2]
return trig_threshold_uncalibrated
def _update_dependent_regs(self):
# Get the calibration coefficients of the front end
dac1, dac2 = self._dac_gains()
adc1, adc2 = self._adc_gains()
# Frontend attenuation flag for modulation
self.atten_compensate_ch1 = 1 if self._get_frontend(1)[1] else 0
self.atten_compensate_ch2 = 1 if self._get_frontend(2)[1] else 0
# Scaling source parameter arrays for each trigger/modulation mode.
mod_source_scalers = [2.0**11 / (8.0 if self.atten_compensate_ch1
else 1.0) * adc1,
2.0**11 / (8.0 if self.atten_compensate_ch2
else 1.0) * adc2,
2.0**14 * dac1,
2.0**14 * dac2,
1.0,
1.0]
gate_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
trig_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0]
# Channel 1 modulation depth
if (self.amod_enable_ch1 is True or self.pmod_enable_ch1 is True or (
self.fmod_enable_ch1 is True)):
try:
self.mod_depth_uncalibrated_ch1
except AttributeError:
self.mod_depth_uncalibrated_ch1 = \
self._get_mod_depth_uncalibrated(1)
self.mod_depth_ch1 = self.mod_depth_uncalibrated_ch1 * \
mod_source_scalers[self.mod_source_ch1] * _WG_MOD_DEPTH_MAX
# Channel 2 modulation depth
if (self.amod_enable_ch2 is True or self.pmod_enable_ch2 is True or (
self.fmod_enable_ch2 is True)):
try:
self.mod_depth_uncalibrated_ch2
except AttributeError:
self.mod_depth_uncalibrated_ch2 = \
self._get_mod_depth_uncalibrated(2)
self.mod_depth_ch2 = self.mod_depth_uncalibrated_ch2 * \
mod_source_scalers[self.mod_source_ch2] * _WG_MOD_DEPTH_MAX
# Channel 1 gate threshold
if self.gate_mode_ch1 == 1:
try:
self.gate_thresh_uncalibrated_ch1
except AttributeError:
self.gate_thresh_uncalibrated_ch1 = \
self._get_gate_thresh_uncalibrated(1)
self.gate_thresh_ch1 = self.gate_thresh_uncalibrated_ch1 / \
gate_source_scalers[self.trig_source_ch1]
# Channel 2 gate threshold
if self.gate_mode_ch2 == 1:
try:
self.gate_thresh_uncalibrated_ch2
except AttributeError:
self.gate_thresh_uncalibrated_ch2 = \
self._get_gate_thresh_uncalibrated(2)
self.gate_thresh_ch2 = self.gate_thresh_uncalibrated_ch2 / \
gate_source_scalers[self.trig_source_ch2]
# Channel 1 N cycle/start/sweep mode trigger threshold
if (self.trig_sweep_mode_ch1 == 1 and self.gate_mode_ch1 != 1):
try:
self.trigger_threshold_uncalibrated_ch1
except AttributeError:
self.trigger_threshold_uncalibrated_ch1 = \
self._get_trig_thresh_uncalibrated(1)
self._trigger1.level = self.trigger_threshold_uncalibrated_ch1 / \
trig_source_scalers[self.trig_source_ch1]
# Channel 2 N cycle/start/sweep mode trigger threshold
if (self.trig_sweep_mode_ch2 == 1 and self.gate_mode_ch2 != 1):
try:
self.trigger_threshold_uncalibrated_ch2
except AttributeError:
self.trigger_threshold_uncalibrated_ch2 = \
self._get_trig_thresh_uncalibrated(2)
self._trigger2.level = self.trigger_threshold_uncalibrated_ch2 / \
trig_source_scalers[self.trig_source_ch2]
def commit(self):
self._update_dependent_regs()
# Commit the register values to the device
super(WaveformGenerator, self).commit()
# Bring in the docstring from the superclass for our docco.
commit.__doc__ = MokuInstrument.commit.__doc__
_wavegen_reg_handlers = {
# channel 1 control:
# modulation controls
'adc1_statuslight':
(REG_BASE_MOD_0,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'amod_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'fmod_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(2, 1),
from_reg_unsigned(2, 1)),
'pmod_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(3, 1),
from_reg_unsigned(3, 1)),
'sweep_enable_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(4, 1),
from_reg_unsigned(4, 1)),
'reverse_sweep_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(5, 1),
from_reg_unsigned(5, 1)),
'mod_source_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(6, 3),
from_reg_unsigned(6, 3)),
'atten_compensate_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(9, 1),
from_reg_unsigned(9, 1)),
'trig_source_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(10, 3),
from_reg_unsigned(10, 3)),
'range_shift_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(13, 6),
from_reg_unsigned(13, 6)),
'sine_trigdly_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(19, 1),
from_reg_unsigned(19, 1)),
'phasedly_en_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(20, 1),
from_reg_unsigned(20, 1)),
'trig_sweep_mode_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(29, 1),
from_reg_unsigned(29, 1)),
'gate_mode_ch1':
(REG_BASE_MOD_0,
to_reg_unsigned(30, 1),
from_reg_unsigned(30, 1)),
'mod_depth_ch1':
(REG_BASE_MOD_0 + 1,
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
'gate_thresh_ch1':
((REG_GATETHRESH_H_CH1, REG_GATETHRESH_L_CH1),
to_reg_signed(16, 48),
from_reg_signed(16, 48)),
# waveform controls
'enable_ch1':
(REG_BASE_WAV_0,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'waveform_type_ch1':
(REG_BASE_WAV_0,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'amplitude_ch1':
(REG_BASE_WAV_0 + 1,
to_reg_signed(0, 18,
xform=lambda obj, a: 2 * a / obj._dac_gains()[0]),
from_reg_signed(0, 18,
xform=lambda obj, a: 2 * a * obj._dac_gains()[0])),
'offset_ch1':
(REG_BASE_WAV_0 + 2,
to_reg_signed(0, 16,
xform=lambda obj, a: a / obj._dac_gains()[0]),
from_reg_signed(0, 16,
xform=lambda obj, a: a * obj._dac_gains()[0])),
't0_ch1':
((REG_BASE_WAV_0 + 13, REG_BASE_WAV_0 + 12),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't1_ch1':
((REG_BASE_WAV_0 + 15, REG_BASE_WAV_0 + 14),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't2_ch1':
((REG_BASE_WAV_0 + 17, REG_BASE_WAV_0 + 16),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
'riserate_ch1':
((REG_BASE_WAV_0 + 19, REG_BASE_WAV_0 + 18),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'fallrate_ch1':
((REG_BASE_WAV_0 + 21, REG_BASE_WAV_0 + 20),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'enable_reset_ch1':
(REG_BASE_WAV_0 + 22,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'phase_dly_ch1':
(REG_BASE_WAV_0 + 23,
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
# channel 2 control:
# modulation controls
'adc2_statuslight':
(REG_BASE_MOD_1,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'amod_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'fmod_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(2, 1),
from_reg_unsigned(2, 1)),
'pmod_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(3, 1),
from_reg_unsigned(3, 1)),
'sweep_enable_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(4, 1),
from_reg_unsigned(4, 1)),
'reverse_sweep_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(5, 1),
from_reg_unsigned(5, 1)),
'mod_source_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(6, 3),
from_reg_unsigned(6, 3)),
'atten_compensate_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(9, 1),
from_reg_unsigned(9, 1)),
'trig_source_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(10, 3),
from_reg_unsigned(10, 3)),
'range_shift_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(13, 6),
from_reg_unsigned(13, 6)),
'sine_trigdly_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(19, 1),
from_reg_unsigned(19, 1)),
'phasedly_en_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(20, 1),
from_reg_unsigned(20, 1)),
'trig_sweep_mode_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(29, 1),
from_reg_unsigned(29, 1)),
'gate_mode_ch2':
(REG_BASE_MOD_1,
to_reg_unsigned(30, 1),
from_reg_unsigned(30, 1)),
'mod_depth_ch2':
((REG_BASE_MOD_1 + 1),
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32)),
'gate_thresh_ch2':
((REG_GATETHRESH_H_CH2, REG_GATETHRESH_L_CH2),
to_reg_signed(16, 48),
from_reg_signed(16, 48)),
# waveform controls
'enable_ch2':
(REG_BASE_WAV_1,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'waveform_type_ch2':
(REG_BASE_WAV_1,
to_reg_unsigned(1, 1),
from_reg_unsigned(1, 1)),
'amplitude_ch2':
((REG_BASE_WAV_1 + 1),
to_reg_signed(0, 18,
xform=lambda obj, a: 2 * a / obj._dac_gains()[1]),
from_reg_signed(0, 18,
xform=lambda obj, a: 2 * a * obj._dac_gains()[1])),
'offset_ch2':
((REG_BASE_WAV_1 + 2),
to_reg_signed(0, 16,
xform=lambda obj, a: a / obj._dac_gains()[1]),
from_reg_signed(0, 16,
xform=lambda obj, a: a * obj._dac_gains()[1])),
't0_ch2':
(((REG_BASE_WAV_1 + 13), (REG_BASE_WAV_1 + 12)),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't1_ch2':
((REG_BASE_WAV_1 + 15, REG_BASE_WAV_1 + 14),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
't2_ch2':
((REG_BASE_WAV_1 + 17, REG_BASE_WAV_1 + 16),
to_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR),
from_reg_unsigned(0, 48,
xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)),
'riserate_ch2':
((REG_BASE_WAV_1 + 19, REG_BASE_WAV_1 + 18),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'fallrate_ch2':
((REG_BASE_WAV_1 + 21, REG_BASE_WAV_1 + 20),
to_reg_signed(0, 64,
xform=lambda obj, o: (o**-1) * _WG_RISESCALE),
from_reg_signed(0, 64,
xform=lambda obj, o: (o / _WG_RISESCALE)**-1)),
'enable_reset_ch2':
(REG_BASE_WAV_1 + 22,
to_reg_unsigned(0, 1),
from_reg_unsigned(0, 1)),
'phase_dly_ch2':
(REG_BASE_WAV_1 + 23,
to_reg_unsigned(0, 32),
from_reg_unsigned(0, 32))
}
_wavegen_mod_reg_handlers = {}
| StarcoderdataPython |
8055181 | <reponame>mismaah/dhivehi-nlp
import unittest
from dhivehi_nlp import trigram_similarity
class test_trigram_similarity(unittest.TestCase):
def test_generate_trigram(self):
result = {
" ބ",
" ބަ",
"ބަޔ",
"ަޔަ",
"ޔަކ",
"ަކު",
"ކު ",
}
self.assertEqual(trigram_similarity.generate_trigrams("ބަޔަކު"), result)
def test_similarity(self):
text = "ރަށްތައް އުފެދިފައިވާ ގޮތުން ވަކިވަކި ކުދިކުދި ރަށްރަށް ހުރި ކަމުގައި ވިޔަސް އެއްބަޔަކު އަނެއް ބަޔަކަށް ބަރޯސާވާ ކަމާއި ވަކި ދަތުރުފަތުރުކޮށް އެއްބައެއްގެ"
result = [
{"word": "ބަޔަކަށް", "similarity": 0.5555555555555556},
{"word": "އެއްބަޔަކު", "similarity": 0.45454545454545453},
{"word": "ބަރޯސާވާ", "similarity": 0.2222222222222222},
]
self.assertEqual(
trigram_similarity.get_similarity("ބަޔަކު", text, max_output=3), result
)
def test_similarity_dict_0(self):
result = [
{"word": "ބަޔަކު", "similarity": 1.0},
{"word": "ބަ", "similarity": 0.6666666666666666},
{"word": "ބ", "similarity": 0.5},
{"word": "ބަޔޭބަޔޭ", "similarity": 0.42857142857142855},
{"word": "ބަޔާން", "similarity": 0.42857142857142855},
]
self.assertEqual(
trigram_similarity.get_similarity("ބަޔަކު", max_output=5), result
)
def test_similarity_dict_1(self):
result = [
{"word": "ހަރު", "similarity": 1.0},
{"word": "ހަހަރު", "similarity": 0.7142857142857143},
{"word": "ހަ", "similarity": 0.6666666666666666},
]
self.assertEqual(
trigram_similarity.get_similarity("ހަރު", max_output=3), result
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3288658 | <reponame>databyjp/asset_correlation_analysis
# ========== (c) <NAME> 16/3/21 ==========
import logging
import pandas as pd
import numpy as np
import utils
import scipy.stats
import plotly.express as px
logger = logging.getLogger(__name__)
desired_width = 320
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', desired_width)
def main():
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
root_logger.addHandler(sh)
# ========== GET DATA ==========
symbol_dict = utils.load_data("data")
df = utils.symbol_dict_to_df(symbol_dict)
df = utils.normalise_price(df)
symbols = list(np.sort(df["symbol"].unique()))
# ========== DETERMINE SIMILARITIES ==========
# Calculate similarities between each stock
r_array = np.zeros([len(symbols), len(symbols)])
p_array = np.zeros([len(symbols), len(symbols)])
for i in range(len(symbols)):
for j in range(len(symbols)):
vals_i = df[df["symbol"] == symbols[i]]['close'].values
vals_j = df[df["symbol"] == symbols[j]]['close'].values
r_ij, p_ij = scipy.stats.pearsonr(vals_i, vals_j)
r_array[i, j] = r_ij
p_array[i, j] = p_ij
# ========== FIND PAIR HIGHEST(+ and -) & SMALLEST CORRELATIONS ==========
min_corr = np.min(np.abs(r_array))
neg_corr = np.min(r_array)
tmp_arr = r_array.copy()
for i in range(len(tmp_arr)):
tmp_arr[i, i] = 0
pos_corr = np.max(tmp_arr)
min_inds = np.where(abs(r_array) == min_corr)
neg_inds = np.where(r_array == neg_corr)
pos_inds = np.where(r_array == pos_corr)
min_pair = [symbols[min_inds[0][0]], symbols[min_inds[0][1]]]
neg_pair = [symbols[neg_inds[0][0]], symbols[neg_inds[0][1]]]
pos_pair = [symbols[pos_inds[0][0]], symbols[pos_inds[0][1]]]
corr_order = np.argsort(tmp_arr.flatten())
corr_num = corr_order[-3]
print(symbols[corr_num // len(symbols)], symbols[corr_num % len(symbols)])
pos_pair_2 = [symbols[corr_num // len(symbols)], symbols[corr_num % len(symbols)]]
for tmp_pair in [min_pair, neg_pair, pos_pair, pos_pair_2]:
pair_df = df[df.symbol.isin(tmp_pair)]
pair_piv_df = pair_df[["norm_close", "symbol", "date"]].pivot("date", "symbol")
pair_piv_df = pair_piv_df.dropna()
pair_piv_df.columns = pair_piv_df.columns.get_level_values(1)
pair_piv_df = pair_piv_df.assign(avg=pair_piv_df.mean(axis=1)).reset_index()
pair_df = pair_piv_df.melt(id_vars="date")
fig = px.line(pair_df, x="date", y="value", color="symbol",
color_discrete_sequence=px.colors.qualitative.Safe,
title=f"Correlation - {utils.get_comp_name(tmp_pair[0])} & {utils.get_comp_name(tmp_pair[1])}",
height=400, width=800,
labels={"value": "Relative price", "date": "Date", "symbol": "Symbol"},
template="plotly_white")
fig.show()
fig.write_image(f"out_img/corr_{tmp_pair[0]}_{tmp_pair[1]}.png")
if __name__ == '__main__':
main()
| StarcoderdataPython |
1668412 | <reponame>AZMAG/smartpy_sim_indicators
from __future__ import division, print_function
import gc
import numpy as np
import pandas as pd
import orca
from smartpy_core.wrangling import broadcast
def clear_cache(table_name, columns=None):
"""
Manually clears cached columns. If no column(s)
are provided then all columns are cleared.
Parameters:
----------
table_name: string
Name of orca data frame wrapper to clear.
columns: string or list, optional
Name of orca column wrapper(s) to clean.
"""
if columns is None:
orca_tab = orca.get_table(table_name)
columns = list(set(orca_tab.columns).difference(set(orca_tab.local_columns)))
if isinstance(columns, list):
for col in columns:
orca.orca._COLUMNS[(table_name, col)].clear_cached()
else:
orca.orca._COLUMNS[(table_name, columns)].clear_cached()
def clear_table(table_name):
"""
Clears out an entire table cache. Only call this if you want
the entire table to be recreated. Use the 'clear_cache' method (above) if you
want to keep the table but just clear additional/computed columns.
"""
orca.orca._TABLES[table_name].clear_cached()
def clear_injectable(injectable_name):
"""
Clears out the cache for an injected function and forces it to be
re-evaluated.
"""
orca.orca._INJECTABLES[injectable_name].clear_cached()
def get_year_bin(year, year_bins):
"""
Returns the bin containing the given year. Intended for small lists.
Parameters:
-----------
year: int
The current simulation year.
year_bins: list
List of years.
Returns:
--------
The year bin that contains the provided year.
"""
year_bins = sorted(year_bins)
first_bin = year_bins[0]
if year is None or year <= first_bin:
return first_bin
idx = -1
for y_bin in year_bins:
if year < y_bin:
break
idx += 1
return year_bins[idx]
############################################
# FUNCTION/INJECTABLE FACTORIES
############################################
def make_broadcast_injectable(from_table, to_table, col_name, fkey,
cache=True, cache_scope='iteration'):
"""
This creates a broadcast column function/injectable and registers it with orca.
Parameters:
-----------
from_table: str
The table name to brodacast from (the right).
to_table: str
The table name to broadcast to (the left).
col_name: str
Name of the column to broadcast.
fkey: str
Name of column on the to table that serves as the foreign key.
cache: bool, optional, default True
Whether or not the broadcast is cached.
cache_scope: str, optional, default `iteration`
Cache scope for the broadcast.
"""
def broadcast_template():
return broadcast(
orca.get_table(from_table)[col_name],
orca.get_table(to_table)[fkey]
)
orca.add_column(to_table, col_name, broadcast_template, cache=cache, cache_scope=cache_scope)
def make_reindex_injectable(from_table, to_table, col_name, cache=True, cache_scope='iteration'):
"""
This creates a PK-PK reindex injectable.
"""
def reindex_template():
from_wrap = orca.get_table(from_table)
to_wrap = orca.get_table(to_table)
return from_wrap[col_name].reindex(to_wrap.index).fillna(0)
orca.add_column(to_table, col_name, reindex_template, cache=cache, cache_scope=cache_scope)
def make_series_broadcast_injectable(from_series, to_table, col_name, fkey, fill_with=None,
cache=True, cache_scope='iteration'):
"""
Broadcasts an injected series to table.
"""
def s_broadcast_template():
b = broadcast(
orca.get_injectable(from_series),
orca.get_table(to_table)[fkey]
)
if fill_with is not None:
b.fillna(fill_with, inplace=True)
return b
orca.add_column(to_table, col_name, s_broadcast_template, cache=cache, cache_scope=cache_scope)
#################################################
# FOR LOADING H5 tables and registering w/ orca
##################################################
def load_tables(h5, year, tables=None):
"""
Loads tables for the desired year and registers them with orca.
Parameters:
-----------
h5: str
full path to the h5 file containing the results.
year: int or str
Year to grab tables for. Provide 'base' for the base year.
tables: list of str, default None
List of tables to load. If None, all tables in that year
will be loaded.
"""
with pd.HDFStore(h5, mode='r') as store:
# grab all the table names in the current year
if tables is None:
tables = [t.split('/')[-1] for t in store.keys() if t.startswith('/{}'.format(year))]
elif not isinstance(tables, list):
tables = [tables]
# read in the table and register it with orca
for t in tables:
df = df = store['{}/{}'.format(year, t)]
orca.add_table(t, df)
def list_store_years(h5, table_name=None):
"""
List the available years in the h5. This assumes tables
follow the structure: /<year>/<table_name>
Parameters:
-----------
h5: str
Full path to the h5 fil.
table_name: str, optional default None
Specific table to look for.
If not provided, returns years for any table.
Returns:
--------
list of str
"""
with pd.HDFStore(h5, mode='r') as s:
if table_name is None:
prefixes = [k.split('/')[1] for k in s.keys()]
else:
prefixes = [k.split('/')[1] for k in s.keys() if k.endswith('/{}'.format(table_name))]
return sorted(list(set(prefixes)))
def list_store_tables(h5, year, full=False):
"""
List the table names available in a given year
Parameters:
-----------
h5: str
Full path to the h5 fil.
year: str or in
The year to look for.
full: bool, optional default False
If True, returns full paths, e.g. /2020/households
If False, returns the base table name, e.g. households
Returns:
--------
list of str
"""
with pd.HDFStore(h5, mode='r') as s:
tables = [t for t in s.keys() if t.startswith('/{}'.format(year))]
if not full:
tables = [t.split('/')[-1] for t in tables]
return tables
#################################################
# FOR GENERATING INDICATORS
##################################################
def get_indicators(h5,
years,
tables,
by,
agg_func,
**agg_kwargs):
"""
Generates indicators (summary attributes).
Parameters:
-----------
h5: str
Full path to the h5 file containing the data to summarize.
years: list
List of years to process.
tables: list
List of tables to load from the h5.
by: str, list of str, or dict:
Column(s) to aggregate by.
If a dict is provided, aggregtions across multiple groupings
will be generated. The provided dict should be keyed by some name
will values indicating the colums(s) to group by. The resulting
dict will then be dict of dicts containing data frames.
agg_func: func
The aggration/indicator function to apply. Should accept
'by' as the input argument.
**agg_kwargs: kwarg dict
Additional arguments to pass to the aggregation function.
Returns:
--------
dict of pandas.DataFrame, keyed by year
"""
base_year = orca.get_injectable('base_year')
to_concat= {}
# get summaries for all years
for y in years:
print('on year: {}...'.format(y))
gc.collect()
# load tables and register w/ orca
if y == base_year:
load_tables(h5, 'base', tables)
else:
load_tables(h5, y, tables)
# get summary results
if not isinstance(by, dict):
to_concat[y] = agg_func(by, **agg_kwargs)
else:
# need to compute indicators across multiple groups
for k, v in by.items():
if k not in to_concat:
to_concat[k] = {}
to_concat[k][y] = agg_func(v, **agg_kwargs)
return to_concat
def compile_to_cols(to_concat, collapse_col_idx=True, collapse_row_idx=True):
"""
Take a dictionary of data frames and concat colum-wise
so there is a column for every column/year combination.
Parameters:
-----------
to_concat: dict of pandas.DataFrames
The data frames to compile
collapse_col_idx: bool, optional, default True
If True, combines multi-columns so that the resulting
columns names are <level2>_<level1>, e.g. pop_2020
collaspe_row_idx: bool, optional, default True
If True, and the dataframe has a multi-index, sends the index
levels to columns and generates a new index.
Returns:
--------
pandas.DataFrame
"""
c = pd.concat(to_concat, axis=1).fillna(0)
# collapse multi columns into a single column
# note this assumes there's only two levels
if collapse_col_idx:
z = zip(
c.columns.get_level_values(0),
c.columns.get_level_values(1),
)
c.columns = ['{}{}{}'.format(y, '_', x) for x, y in z]
# collapse multi-index rows
if collapse_row_idx and c.index.nlevels > 1:
col_names = list(c.index.names)
c.reset_index(inplace=True)
c.index
return c
def compile_to_rows(to_concat, collapse_row_idx=False):
"""
Take a dictionary of data frames and concat row-wise
so there is a row for every year, group combination.
Parameters:
-----------
to_concat: dict of pandas.DataFrames
The data frames to compile
collaspe_row_idx: bool, optional, default True
If True, and the dataframe has a multi-index, sends the index
levels to columns and generates a new index.
Returns:
--------
pandas.DataFrame
"""
c = pd.concat(to_concat)
grp_levels = list(c.index.names[1:])
c.index.names = ['year'] + grp_levels
c.reset_index(inplace=True)
if not collapse_row_idx or len(grp_levels) == 1:
c.set_index(grp_levels, inplace=True)
return c
| StarcoderdataPython |
20901 | import sys
import time
from tia.trad.tools.io.follow import followMonitor
import tia.configuration as conf
from tia.trad.tools.errf import eReport
import ujson as json
import matplotlib.pyplot as plt
import math
import collections
import logging
from tia.trad.tools.ipc.processLogger import PROCESS_NAME
LOGGER_NAME = PROCESS_NAME + __file__.split("/")[-1]; logger = logging.getLogger(LOGGER_NAME)
reportFile = None
def pointDistance(initF, initI, point):
try:
t = initI[0]-initF[0], initI[1]-initF[1] # Vector ab
dd = math.sqrt(t[0]**2+t[1]**2) # Length of ab
t = t[0]/dd, t[1]/dd # unit vector of ab
n = -t[1], t[0] # normal unit vector to ab
ac = point[0]-initF[0], point[1]-initF[1] # vector ac
return math.fabs(ac[0]*n[0]+ac[1]*n[1]) # Projection of ac to n (the minimum distance)
except Exception: raise
def getAvg(_list):
try:
return float(max(_list) + min(_list)) / float(2)
except Exception: raise
def shutdown():
try:
logger.debug("shutting down")
global reportFile
reportFile.close()
except Exception: raise
def run(**kwargs):
try:
global logger
global reportFile
logger = kwargs["processLogger"]
logger.debug("monitor_mainTr:hi")
_initFunds = kwargs["initFunds"]
_initItems = kwargs["initItems"]
plt.ion() # turn interactive on
fig = plt.figure()
fig.show()
# raw
ax = fig.add_subplot(221)
#hline = ax.axhline(y=_initFunds)
#vline = ax.axvline(x=_initItems)
#ax.set_xscale("log")
#ax.set_yscale("log")
data, = ax.plot([], [], 'b+')
data11, = ax.plot([], [], 'ro')
# value
ax2 = fig.add_subplot(222)
data2, = ax2.plot([], [], 'ro-')
# inside TM
ax3 = fig.add_subplot(223)
data3, = ax3.plot([], [], 'ro')
data4, = ax3.plot([],[], 'bo')
minBids, = ax3.plot([], [], "r>")
maxAsks, = ax3.plot([], [], "b>")
# top b/a
ax5 = fig.add_subplot(224)
dataI, = ax5.plot([], [], "o-")
dataF, = ax5.plot([], [], "ro-")
windowLength = 50
fundsHistory = collections.deque(maxlen=windowLength); itemsHistory = collections.deque(maxlen=windowLength)
valueHistory = collections.deque(maxlen=windowLength)
tmFundsHistory = collections.deque(maxlen=windowLength); tmItemsHistory = collections.deque(maxlen=windowLength)
tmIAHSum = collections.deque(maxlen=windowLength); tmFAHSum = collections.deque(maxlen=windowLength)
topAsksHistory = collections.deque(maxlen=10)
topBidsHistory = collections.deque(maxlen=10)
# touch report.json
#reportFile = open(conf.FN_REPORT, "w"); reportFile.close()
reportFile = open(conf.FN_REPORT, "r")
newline = followMonitor(reportFile, fig)
while 1:
try:
#for line in reportFile:
line = newline.next()
jsonObj = json.loads(line)
universeSize = float(jsonObj["universeSize"])
topAsks = jsonObj["topAsks"]; topBids = jsonObj["topBids"]
initInvF = float(_initFunds) * universeSize
initInvI = float(_initItems) * universeSize
cumulFunds = float(jsonObj["cumulFunds"])
cumulItems = float(jsonObj["cumulItems"])
#fundsHistory.append(funds); itemsHistory.append(items)
dist = pointDistance([0, initInvF], [initInvI, 0], [cumulFunds, cumulItems])
fundsHistory.append(dist)
#data.set_ydata(fundsHistory); data.set_xdata(itemsHistory)
data.set_ydata(fundsHistory); data.set_xdata(xrange(len(fundsHistory)))
#data11.set_ydata(funds); data11.set_xdata(items)
#data11.set_ydata(dist); data11.set_xdata(xrange(len(fundsHistory)))
ax.relim()
ax.autoscale_view(True,True,True)
tmFunds = jsonObj["tmFunds"]; tmItems = jsonObj["tmItems"]
tmFA = 0; tmIA = 0
tmFPH = collections.deque(); tmFAH = collections.deque()
tmIPH = collections.deque(); tmIAH = collections.deque()
for price in tmFunds:
amount = tmFunds[price]
tmFPH.append(price)
tmFAH.append(amount)
tmFA += amount
tmFAHSum.append(tmFA)
for price in tmItems:
amount = tmItems[price]
tmIPH.append(price)
tmIAH.append(amount)
tmIA += amount
tmIAHSum.append(tmIA)
dataI.set_ydata(tmIAHSum); dataI.set_xdata(xrange(len(tmIAHSum)))
dataF.set_ydata(tmFAHSum); dataF.set_xdata(xrange(len(tmFAHSum)))
ax5.relim()
ax5.autoscale_view(True,True,True)
value = float(jsonObj["value"]) / initInvF if initInvF else float(jsonObj["value"])
valueHistory.append(value)
data2.set_xdata(range(len(valueHistory)))
data2.set_ydata(valueHistory)
ax2.relim()
ax2.autoscale_view(True,True,True)
"""
TM stuff
"""
# make universe states pretty
tmpList = list(tmFAH) + list(tmIAH)
xDrawStart = min(tmpList)
drawedInterval = max(tmpList) - xDrawStart
spacing = float(drawedInterval) / float (len(topBids))
offset = float(spacing) / float(2)
xcords = collections.deque()
for index, bid in enumerate(topBids):
xcords.append(offset + xDrawStart + index * spacing)
minBids.set_ydata(topBids); minBids.set_xdata(xcords)
maxAsks.set_ydata(topAsks); maxAsks.set_xdata(xcords)
data3.set_xdata(tmFAH)
data3.set_ydata(tmFPH)
data4.set_xdata(tmIAH)
data4.set_ydata(tmIPH)
ax3.relim()
ax3.autoscale_view(True,True,True)
fig.canvas.draw()
#plt.savefig(conf.FN_PLOT_IMAGE)
except ValueError: continue
except Exception as ex:
eReport(__file__)
reportFile.close()
sys.exit()
| StarcoderdataPython |
11352923 | # By <NAME>
# <EMAIL>
# Honeybee started by <NAME> is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
This component [removes | updates] Honeybee components from [grasshopper | a source folder]
-
Provided by Honeybee 0.0.55
Args:
sourceDirectory_: Optional address to a folder that contains Honeybee updated userObjects. If None the component will download the latest version from GitHUB.
_updateThisFile: Set to True if you want the Honeybee components in this file be updated from the source directory
_updateAllUObjects: Set to True to sync all the Ladybug and Honeybee userObjects
Returns:
readMe!: ...
"""
ghenv.Component.Name = "Honeybee_Update Honeybee"
ghenv.Component.NickName = 'updateHoneybee'
ghenv.Component.Message = 'VER 0.0.55\nSEP_11_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "11 | Developers"
#compatibleHBVersion = VER 0.0.55\nAUG_25_2014
#compatibleLBVersion = VER 0.0.58\nAUG_20_2014
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import scriptcontext as sc
import Grasshopper.Kernel as gh
import os
import shutil
import zipfile
import time
import urllib
import Grasshopper.Folders as folders
def removeCurrentHB():
UOFolders = folders.ClusterFolders
for folder in UOFolders:
fileNames = os.listdir(folder)
print 'Removing Honeybee!'
for fileName in fileNames:
# check for ladybug userObjects and delete the files
if fileName.StartsWith('Honeybee'):
fullPath = os.path.join(folder, fileName)
os.remove(fullPath)
def downloadSourceAndUnzip(lb_preparation):
"""
Download the source code from github and unzip it in temp folder
"""
url = "https://github.com/mostaphaRoudsari/honeybee/archive/master.zip"
targetDirectory = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "honeybeeSrc")
# download the zip file
print "Downloading the source code..."
zipFile = os.path.join(targetDirectory, os.path.basename(url))
# if the source file is just downloded then just use the available file
if os.path.isfile(zipFile) and time.time() - os.stat(zipFile).st_mtime < 1000: download = False
else:
download = True
# remove the old version
try: lb_preparation.nukedir(targetDirectory, True)
except: pass
# create the target directory
if not os.path.isdir(targetDirectory): os.mkdir(targetDirectory)
if download:
webFile = urllib.urlopen(url)
localFile = open(zipFile, 'wb')
localFile.write(webFile.read())
webFile.close()
localFile.close()
if not os.path.isfile(zipFile):
print "Download failed! Try to download and unzip the file manually form:\n" + url
return
#unzip the file
with zipfile.ZipFile(zipFile) as zf:
for f in zf.namelist():
if f.endswith('/'):
try: os.makedirs(f)
except: pass
else:
zf.extract(f, targetDirectory)
userObjectsFolder = os.path.join(targetDirectory, r"honeybee-master\userObjects")
return userObjectsFolder
def getAllTheComponents(onlyGHPython = True):
components = []
document = ghenv.Component.OnPingDocument()
for component in document.Objects:
if onlyGHPython and type(component)!= type(ghenv.Component):
pass
else:
components.append(component)
return components
def updateTheComponent(component, newUOFolder, lb_preparation):
def isNewerVersion(currentUO, component):
"""
check if the component has a newer version than the current userObjects
"""
# get the code insider the userObject
ghComponent = currentUO.InstantiateObject()
# version of the connected component
if component.Message == None:
return True, ghComponent.Code
if len(component.Message.split("\n"))<2:
return True, ghComponent.Code
ghVersion, ghDate = component.Message.split("\n")
ghCompVersion = map(int, ghVersion.split("VER ")[1].split("."))
month, day, ghYear = ghDate.split("_")
# print version, date
month = lb_preparation.monthList.index(month.upper()) + 1
ghCompDate = int(lb_preparation.getJD(month, day))
# this is not the best way but works for now!
# should be a better way to compute the component and get the message
componentCode = ghComponent.Code.split("\n")
UODate = ghCompDate - 1
# version of the file
for lineCount, line in enumerate(componentCode):
if lineCount > 200: break
if line.strip().startswith("ghenv.Component.Message"):
#print line
# print line.split("=")[1].strip().split("\n")
version, date = line.split("=")[1].strip().split("\\n")
# in case the file doesn't have an standard Ladybug message let it be updated
try:
UOVersion = map(int, version.split("VER ")[1].split("."))
except Exception, e:
return True, ghComponent.Code
month, day, UOYear = date.split("_")
month = lb_preparation.monthList.index(month.upper()) + 1
UODate = int(lb_preparation.getJD(month, day))
break
# check if the version of the code is newer
if int(ghYear.strip()) < int(UOYear[:-1].strip()):
return True, ghComponent.Code
elif ghCompDate < UODate:
return True, ghComponent.Code
elif ghCompDate == UODate:
for ghVer, UOVer in zip(UOVersion, UOVersion):
if ghVer > UOVer: return False, " "
return True, ghComponent.Code
else:
return False, " "
# check if the userObject is already existed in the folder
try:
filePath = os.path.join(newUOFolder, component.Name + ".ghuser")
newUO = gh.GH_UserObject(filePath)
except:
# there is no newer userobject with the same name so just return
return
# if is newer remove
isNewer, newCode = isNewerVersion(newUO, component)
# replace the code inside the component with userObject code
if isNewer:
component.Code = newCode
component.ExpireSolution(True)
def main(sourceDirectory, updateThisFile, updateAllUObjects):
if not sc.sticky.has_key('ladybug_release') or not sc.sticky.has_key('honeybee_release'):
return "you need to let both Ladybug and Honeybee fly first!", False
lb_preparation = sc.sticky["ladybug_Preparation"]()
if sourceDirectory == None:
userObjectsFolder = downloadSourceAndUnzip(lb_preparation)
if userObjectsFolder==None: return "Download failed! Read component output for more information!", False
else:
userObjectsFolder = sourceDirectory
destinationDirectory = folders.ClusterFolders[0]
if updateThisFile:
# find all the userObjects
ghComps = getAllTheComponents()
# for each of them check and see if there is a userObject with the same name is available
for ghComp in ghComps:
if ghComp.Name != "Honeybee_Update Honeybee":
updateTheComponent(ghComp, userObjectsFolder, lb_preparation)
return "Done!", True
# copy files from source to destination
if updateAllUObjects:
if not userObjectsFolder or not os.path.exists(userObjectsFolder ):
warning = 'source directory address is not a valid address!'
print warning
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
print 'Updating...'
srcFiles = os.listdir(userObjectsFolder)
for srcFileName in srcFiles:
# check for ladybug userObjects
if srcFileName.StartsWith('Honeybee'):
srcFullPath = os.path.join(userObjectsFolder, srcFileName)
dstFullPath = os.path.join(destinationDirectory, srcFileName)
# check if a newer version is not aleady exist
if not os.path.isfile(dstFullPath): shutil.copy2(srcFullPath, dstFullPath)
# or is older than the new file
elif os.stat(srcFullPath).st_mtime - os.stat(dstFullPath).st_mtime > 1: shutil.copy2(srcFullPath, dstFullPath)
return "Done!" , True
if _updateThisFile or _updateAllUObjects:
msg, success = main(sourceDirectory_, _updateThisFile, _updateAllUObjects)
if not success:
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
else:
print msg
else:
print " "
| StarcoderdataPython |
3574780 | <filename>scoring.py<gh_stars>0
def count_common_tags(tags_list1, tags_list2):
"""
:param tags_list1: The first list of tags
:param tags_list2: The second list of tags
:return: The number of tags in common between these 2 slides
"""
common_tags_cpt = 0
tags_List1_tmp = tags_List2_tmp = []
if len(tags_list1) < len(tags_list2):
tags_List1_tmp = tags_list2
tags_List2_tmp = tags_list1
else:
tags_List1_tmp = tags_list1
tags_List2_tmp = tags_list2
for tag1 in tags_List1_tmp:
for tag2 in tags_List2_tmp:
if tag1 == tag2:
common_tags_cpt += 1
return common_tags_cpt
def count_tags_s1(tags_list1, tags_list2):
"""
:param tags_list1: The first list of tags
:param tags_list2: The second list of tags
:return:
"""
tags_s1 = 0
for tag1 in tags_list1:
for tag2 in tags_list2:
if not (tag1 == tag2):
tags_s1+=1
return tags_s1
def count_tags_s2(tags_list1, tags_list2):
"""
:param tags_list1: The first list of tags
:param tags_list2: The second list of tags
:return:
"""
tags_s2 = 0
for tag1 in tags_list2:
for tag2 in tags_list1:
if not (tag1 == tag2):
tags_s2+=1
return tags_s2
def count_score(tags_list1, tags_list2):
"""
:param tags_list1: The first list of tags
:param tags_list2: The second list of tags
:return: The score obtained
"""
common_tags_cpt = count_common_tags(tags_list1,tags_list2)
tags_s1 = count_tags_s1 (tags_list1,tags_list2)
tags_s2 = count_tags_s2 (tags_list1,tags_list2)
return min(
common_tags_cpt,
tags_s1,
tags_s2
)
| StarcoderdataPython |
8192583 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright © 2014, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import unicode_literals, absolute_import, print_function
from zope.cachedescriptors.property import Lazy
from gs.content.email.base import SiteEmail, TextMixin
from gs.profile.base.page import ProfilePage
UTF8 = 'utf-8'
class ResetMessage(SiteEmail, ProfilePage):
subject = 'Password reset'
@Lazy
def supportEmail(self):
m = 'Hello,\n\nI recieved a password-reset message for my profile '\
'at\n {0}/{1}\nand...'
msg = m.format(self.siteInfo.url, self.userInfo.url)
retval = self.mailto(self.siteInfo.get_support_email(), self.subject, msg)
return retval
class ResetMessageText(ResetMessage, TextMixin):
def __init__(self, context, request):
super(ResetMessageText, self).__init__(context, request)
filename = 'gs-profile-password-reset-{0}.txt'.format(self.userInfo.id)
self.set_header(filename)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.