text
stringlengths
0
1.05M
meta
dict
from flex.decorators import rewrite_reserved_words from flex.exceptions import ( MultipleParametersFound, NoParameterFound, ) from flex.utils import dereference_reference @rewrite_reserved_words def is_match(parameter, **kwargs): for key, value in kwargs.items(): if key not in parameter: return False elif parameter[key] != value: return False return True @rewrite_reserved_words def filter_parameters(parameters, **kwargs): return [p for p in parameters if is_match(p, **kwargs)] @rewrite_reserved_words def find_parameter(parameters, **kwargs): """ Given a list of parameters, find the one with the given name. """ matching_parameters = filter_parameters(parameters, **kwargs) if len(matching_parameters) == 1: return matching_parameters[0] elif len(matching_parameters) > 1: raise MultipleParametersFound() raise NoParameterFound() def merge_parameter_lists(*parameter_definitions): """ Merge multiple lists of parameters into a single list. If there are any duplicate definitions, the last write wins. """ merged_parameters = {} for parameter_list in parameter_definitions: for parameter in parameter_list: key = (parameter['name'], parameter['in']) merged_parameters[key] = parameter return merged_parameters.values() def dereference_parameter_list(parameters, context): return tuple(( dereference_reference(p['$ref'], context) if '$ref' in p else p for p in parameters ))
{ "repo_name": "pipermerriam/flex", "path": "flex/parameters.py", "copies": "1", "size": "1594", "license": "mit", "hash": 8842950974416794000, "line_mean": 27.4642857143, "line_max": 76, "alpha_frac": 0.6712672522, "autogenerated": false, "ratio": 4.331521739130435, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5502788991330434, "avg_score": null, "num_lines": null }
from flex.error_messages import MESSAGES from flex.exceptions import ValidationError from flex.constants import ( EMPTY, INTEGER, STRING, ) from flex.utils import pluralize from flex.validation.common import ( generate_object_validator, ) from flex.decorators import ( pull_keys_from_obj, suffix_reserved_words, skip_if_any_kwargs_empty, ) @pull_keys_from_obj('minLength', 'maxLength') @skip_if_any_kwargs_empty('minLength', 'maxLength') def validate_max_length_greater_than_or_equal_to_min_length(minLength, maxLength, **kwargs): if minLength is EMPTY or maxLength is EMPTY: return if not maxLength >= minLength: raise ValidationError( MESSAGES['max_length']['must_be_greater_than_min_length'] ) @pull_keys_from_obj('type', 'maxLength') @suffix_reserved_words @skip_if_any_kwargs_empty('type_', 'maxLength') def validate_type_for_max_length(type_, maxLength, **kwargs): types = pluralize(type_) if not set(types).intersection((STRING,)): raise ValidationError( MESSAGES['type']['invalid_type_for_max_length'], ) max_length_schema = { 'type': INTEGER, 'minimum': 1, } max_length_validator = generate_object_validator( schema=max_length_schema, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/max_length.py", "copies": "1", "size": "1274", "license": "mit", "hash": -4340689488546424000, "line_mean": 25, "line_max": 92, "alpha_frac": 0.6844583987, "autogenerated": false, "ratio": 3.4619565217391304, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46464149204391303, "avg_score": null, "num_lines": null }
from flex.exceptions import ( ValidationError, ErrorList, ) from flex.constants import ( NULL, BOOLEAN, INTEGER, NUMBER, STRING, ARRAY, FILE, ) from flex.utils import ( pluralize, ) from flex.validation.common import ( generate_object_validator, ) from flex.decorators import ( suffix_reserved_words, skip_if_not_of_type, ) from flex.validation.schema import ( construct_schema_validators, ) single_type_schema = { 'type': STRING, 'enum': [ NULL, BOOLEAN, INTEGER, NUMBER, STRING, ARRAY, FILE, ], } single_type_validators = construct_schema_validators(single_type_schema, {}) single_type_validator = generate_object_validator( field_validators=single_type_validators, ) @suffix_reserved_words @skip_if_not_of_type(ARRAY, STRING) def validate_types(type_, **kwargs): types = pluralize(type_) with ErrorList() as errors: for value in types: try: single_type_validator(value) except ValidationError as err: errors.add_error(err.detail) type_schema = { 'type': [ STRING, ARRAY, ], } type_validators = construct_schema_validators(type_schema, {}) type_validators.add_validator('type', validate_types) type_validator = generate_object_validator( field_validators=type_validators, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/single_parameter/type.py", "copies": "1", "size": "1412", "license": "mit", "hash": 4999768941174952000, "line_mean": 18.8873239437, "line_max": 76, "alpha_frac": 0.6331444759, "autogenerated": false, "ratio": 3.68668407310705, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48198285490070497, "avg_score": null, "num_lines": null }
from flex.exceptions import ( ValidationError, ErrorList, ) from flex.constants import ( NULL, BOOLEAN, INTEGER, NUMBER, STRING, ARRAY, OBJECT, ) from flex.utils import ( pluralize, ) from flex.validation.common import ( generate_object_validator, ) from flex.decorators import ( suffix_reserved_words, skip_if_not_of_type, ) from flex.validation.schema import ( construct_schema_validators, ) single_type_schema = { 'type': STRING, 'enum': [ NULL, BOOLEAN, INTEGER, NUMBER, STRING, ARRAY, OBJECT, ], } single_type_validators = construct_schema_validators(single_type_schema, {}) single_type_validator = generate_object_validator( field_validators=single_type_validators, ) @suffix_reserved_words @skip_if_not_of_type(ARRAY, STRING) def validate_types(type_, **kwargs): types = pluralize(type_) with ErrorList() as errors: for value in types: try: single_type_validator(value) except ValidationError as err: errors.add_error(err.detail) type_schema = { 'type': [ STRING, ARRAY, ], } type_validators = construct_schema_validators(type_schema, {}) type_validators.add_validator('type', validate_types) type_validator = generate_object_validator( field_validators=type_validators, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/schema/type.py", "copies": "1", "size": "1416", "license": "mit", "hash": -7327857833822817000, "line_mean": 18.9436619718, "line_max": 76, "alpha_frac": 0.634180791, "autogenerated": false, "ratio": 3.677922077922078, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4812102868922078, "avg_score": null, "num_lines": null }
from flex.exceptions import ValidationError from flex.constants import ( INTEGER, NUMBER, EMPTY, BOOLEAN, ) from flex.utils import ( pluralize, ) from flex.error_messages import MESSAGES from flex.validation.common import ( generate_object_validator, ) from flex.decorators import ( suffix_reserved_words, pull_keys_from_obj, skip_if_any_kwargs_empty, ) @pull_keys_from_obj('minimum', 'exclusiveMinimum') @skip_if_any_kwargs_empty('exclusiveMinimum') def validate_minimum_required_if_exclusive_minimum_set(minimum, exclusiveMinimum, **kwargs): if exclusiveMinimum is True and minimum is EMPTY: raise ValidationError( MESSAGES['minimum']['exclusive_minimum_required_minimum'], ) @pull_keys_from_obj('type', 'minimum') @suffix_reserved_words @skip_if_any_kwargs_empty('type_', 'minimum') def validate_type_for_minimum(type_, minimum, **kwargs): types = pluralize(type_) if not set(types).intersection((INTEGER, NUMBER)): raise ValidationError( MESSAGES['type']['invalid_type_for_minimum'], ) minimum_schema = { 'type': NUMBER, } minimum_validator = generate_object_validator( schema=minimum_schema, ) exclusive_minimum_schema = { 'type': BOOLEAN, } exclusive_minimum_validator = generate_object_validator( schema=exclusive_minimum_schema, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/minimum.py", "copies": "1", "size": "1367", "license": "mit", "hash": -324135418392130000, "line_mean": 23.8545454545, "line_max": 92, "alpha_frac": 0.6986100951, "autogenerated": false, "ratio": 3.684636118598383, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9882268696591834, "avg_score": 0.00019550342130987295, "num_lines": 55 }
from flex.exceptions import ValidationError from flex.context_managers import ErrorDict from flex.validation.operation import ( construct_operation_validators, validate_operation, ) from flex.validation.common import ( validate_request_method_to_operation, validate_path_to_api_path, ) def validate_request(request, schema): """ Request validation does the following steps. 1. validate that the path matches one of the defined paths in the schema. 2. validate that the request method conforms to a supported methods for the given path. 3. validate that the request parameters conform to the parameter definitions for the operation definition. """ with ErrorDict() as errors: # 1 try: api_path = validate_path_to_api_path( path=request.path, context=schema, **schema ) except ValidationError as err: errors['path'].add_error(err.detail) return # this causes an exception to be raised since errors is no longer falsy. path_definition = schema['paths'][api_path] or {} if not path_definition: # TODO: is it valid to not have a definition for a path? return # 2 try: operation_definition = validate_request_method_to_operation( request_method=request.method, path_definition=path_definition, ) except ValidationError as err: errors['method'].add_error(err.detail) return if operation_definition is None: # TODO: is this compliant with swagger, can path operations have a null # definition? return # 3 operation_validators = construct_operation_validators( api_path=api_path, path_definition=path_definition, operation_definition=operation_definition, context=schema, ) try: validate_operation(request, operation_validators, context=schema) except ValidationError as err: errors['method'].add_error(err.detail)
{ "repo_name": "pipermerriam/flex", "path": "flex/validation/request.py", "copies": "1", "size": "2198", "license": "mit", "hash": 1332161426541184000, "line_mean": 32.8153846154, "line_max": 94, "alpha_frac": 0.6155595996, "autogenerated": false, "ratio": 4.8307692307692305, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0007004529981326812, "num_lines": 65 }
from flex.exceptions import ValidationError from flex.error_messages import MESSAGES from flex.constants import ( BOOLEAN, ARRAY, ) from flex.utils import pluralize from flex.validation.common import ( generate_object_validator, ) from flex.validation.schema import ( construct_schema_validators, ) from flex.decorators import ( pull_keys_from_obj, suffix_reserved_words, skip_if_any_kwargs_empty, ) @pull_keys_from_obj('type', 'uniqueItems') @suffix_reserved_words @skip_if_any_kwargs_empty('type_', 'uniqueItems') def validate_type_for_unique_items(type_, uniqueItems, **kwargs): types = pluralize(type_) if not uniqueItems: return if not set(types).intersection((ARRAY,)): raise ValidationError( MESSAGES['type']['invalid_type_for_unique_items'], ) unique_items_schema = { 'type': BOOLEAN, } unique_items_validators = construct_schema_validators(unique_items_schema, {}) unique_items_validator = generate_object_validator( field_validators=unique_items_validators, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/unique_items.py", "copies": "1", "size": "1061", "license": "mit", "hash": -4806406074359830000, "line_mean": 23.6744186047, "line_max": 78, "alpha_frac": 0.7106503299, "autogenerated": false, "ratio": 3.608843537414966, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4819493867314966, "avg_score": null, "num_lines": null }
from flex.exceptions import ValidationError from flex.error_messages import MESSAGES from flex.constants import ( INTEGER, EMPTY, ARRAY, ) from flex.utils import pluralize from flex.validation.common import ( generate_object_validator, ) from flex.validation.schema import ( construct_schema_validators, ) from flex.decorators import ( pull_keys_from_obj, suffix_reserved_words, skip_if_any_kwargs_empty, ) @pull_keys_from_obj('minItems', 'maxItems') def validate_max_items_greater_than_or_equal_to_min_items(minItems, maxItems, **kwargs): if minItems is EMPTY or maxItems is EMPTY: return if not maxItems >= minItems: raise ValidationError( MESSAGES['max_items']['must_be_greater_than_min_items'] ) @pull_keys_from_obj('type', 'maxItems') @suffix_reserved_words @skip_if_any_kwargs_empty('type_', 'maxItems') def validate_type_for_max_items(type_, maxItems, **kwargs): types = pluralize(type_) if not set(types).intersection((ARRAY,)): raise ValidationError( MESSAGES['type']['invalid_type_for_max_items'], ) max_items_schema = { 'type': INTEGER, } max_items_validators = construct_schema_validators(max_items_schema, {}) max_items_validator = generate_object_validator( field_validators=max_items_validators, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/max_items.py", "copies": "1", "size": "1341", "license": "mit", "hash": 1714242171670297900, "line_mean": 25.2941176471, "line_max": 88, "alpha_frac": 0.6927665921, "autogenerated": false, "ratio": 3.4740932642487046, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.966575829212751, "avg_score": 0.0002203128442388191, "num_lines": 51 }
from flex.exceptions import ValidationError from flex.error_messages import MESSAGES from flex.constants import ( INTEGER, EMPTY, OBJECT, ) from flex.utils import pluralize from flex.validation.common import ( generate_object_validator, ) from flex.validation.schema import ( construct_schema_validators, ) from flex.decorators import ( pull_keys_from_obj, suffix_reserved_words, skip_if_any_kwargs_empty, ) @pull_keys_from_obj('minProperties', 'maxProperties') def validate_max_properties_is_greater_than_or_equal_to_min_properties(minProperties, maxProperties, **kwargs): if maxProperties is EMPTY or minProperties is EMPTY: return if not maxProperties >= minProperties: raise ValidationError( MESSAGES['max_properties']['must_be_greater_than_min_properties'], ) @pull_keys_from_obj('type', 'maxProperties') @suffix_reserved_words @skip_if_any_kwargs_empty('type_', 'maxProperties') def validate_type_for_max_properties(type_, maxProperties, **kwargs): types = pluralize(type_) if not set(types).intersection((OBJECT,)): raise ValidationError( MESSAGES['type']['invalid_type_for_max_properties'], ) max_properties_schema = { 'type': INTEGER, 'minimum': 0, } max_properties_validators = construct_schema_validators(max_properties_schema, {}) max_properties_validator = generate_object_validator( field_validators=max_properties_validators, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/schema/max_properties.py", "copies": "1", "size": "1616", "license": "mit", "hash": 3160091712169656000, "line_mean": 29.4905660377, "line_max": 85, "alpha_frac": 0.6503712871, "autogenerated": false, "ratio": 4.08080808080808, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0008962099750149475, "num_lines": 53 }
from flex.exceptions import ValidationError from flex.error_messages import MESSAGES from flex.constants import ( INTEGER, NUMBER, EMPTY, BOOLEAN ) from flex.utils import ( pluralize, ) from flex.validation.common import ( generate_object_validator, ) from flex.decorators import ( pull_keys_from_obj, suffix_reserved_words, skip_if_any_kwargs_empty, ) @pull_keys_from_obj('minimum', 'maximum') def validate_maximum_is_gte_minimum(minimum, maximum, **kwargs): if minimum is EMPTY or maximum is EMPTY: return if not maximum >= minimum: raise ValidationError(MESSAGES['maximum']['must_be_greater_than_minimum']) @pull_keys_from_obj('maximum', 'exclusiveMaximum') def validate_maximum_required_if_exclusive_maximum_set(maximum, exclusiveMaximum, **kwargs): if exclusiveMaximum is EMPTY: return if exclusiveMaximum is True and maximum is EMPTY: raise ValidationError( MESSAGES['maximum']['exclusive_maximum_required_maximum'], ) @pull_keys_from_obj('type', 'maximum') @suffix_reserved_words @skip_if_any_kwargs_empty('type_', 'maximum') def validate_type_for_maximum(type_, maximum, **kwargs): types = pluralize(type_) if not set(types).intersection((INTEGER, NUMBER)): raise ValidationError( MESSAGES['type']['invalid_type_for_maximum'], ) maximum_schema = { 'type': NUMBER, } maximum_validator = generate_object_validator( schema=maximum_schema, ) exclusive_maximum_schema = { 'type': BOOLEAN, } exclusive_maximum_validator = generate_object_validator( schema=exclusive_maximum_schema, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/maximum.py", "copies": "1", "size": "1652", "license": "mit", "hash": -2447328980961844000, "line_mean": 24.8125, "line_max": 92, "alpha_frac": 0.6930992736, "autogenerated": false, "ratio": 3.7889908256880735, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9980308780464391, "avg_score": 0.00035626376473636484, "num_lines": 64 }
from flex.exceptions import ValidationError from flex.error_messages import MESSAGES from flex.constants import ( INTEGER, OBJECT, ) from flex.utils import pluralize from flex.validation.common import ( generate_object_validator, ) from flex.validation.schema import ( construct_schema_validators, ) from flex.decorators import ( pull_keys_from_obj, suffix_reserved_words, skip_if_any_kwargs_empty, ) @pull_keys_from_obj('type', 'minProperties') @suffix_reserved_words @skip_if_any_kwargs_empty('type_', 'minProperties') def validate_type_for_min_properties(type_, minProperties, **kwargs): types = pluralize(type_) if not set(types).intersection((OBJECT,)): raise ValidationError( MESSAGES['type']['invalid_type_for_min_properties'], ) min_properties_schema = { 'type': INTEGER, 'minimum': 0, } min_properties_validators = construct_schema_validators(min_properties_schema, {}) min_properties_validator = generate_object_validator( field_validators=min_properties_validators, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/schema/min_properties.py", "copies": "1", "size": "1060", "license": "mit", "hash": -1789541669094902300, "line_mean": 25.5, "line_max": 82, "alpha_frac": 0.7188679245, "autogenerated": false, "ratio": 3.6805555555555554, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48994234800555553, "avg_score": null, "num_lines": null }
from flex.exceptions import ValidationError from flex.error_messages import MESSAGES from flex.constants import ( STRING, PARAMETER_IN_VALUES, PATH, BODY, EMPTY, ) from flex.validation.common import ( generate_object_validator, ) from flex.decorators import ( pull_keys_from_obj, suffix_reserved_words, skip_if_any_kwargs_empty, ) in_schema = { 'type': STRING, 'enum': PARAMETER_IN_VALUES, } @pull_keys_from_obj('in', 'required') @skip_if_any_kwargs_empty('in') @suffix_reserved_words def validate_path_parameters_must_be_required(in_, required, **kwargs): if in_ == PATH: if required is not True: raise ValidationError(MESSAGES['required']['path_parameters_must_be_required']) @pull_keys_from_obj('in', 'schema') @skip_if_any_kwargs_empty('in') @suffix_reserved_words def validate_body_parameters_must_include_a_schema(in_, schema, **kwargs): if in_ == BODY: if schema is EMPTY: raise ValidationError(MESSAGES['schema']['body_parameters_must_include_a_schema']) @pull_keys_from_obj('in', 'type') @skip_if_any_kwargs_empty('in') @suffix_reserved_words def validate_type_declared_for_non_body_parameters(in_, type_, **kwargs): if in_ != BODY: if type_ is EMPTY: raise ValidationError(MESSAGES['type']['non_body_parameters_must_declare_a_type']) in_validator = generate_object_validator( schema=in_schema, )
{ "repo_name": "pipermerriam/flex", "path": "flex/loading/common/single_parameter/in_.py", "copies": "1", "size": "1436", "license": "mit", "hash": -8016222090286530000, "line_mean": 25.1090909091, "line_max": 94, "alpha_frac": 0.6796657382, "autogenerated": false, "ratio": 3.33953488372093, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9516298604030353, "avg_score": 0.0005804035781152486, "num_lines": 55 }
from flexget.api.app import base_message from flexget.api.core.plugins import ObjectsContainer as OC from flexget.utils import json class TestPluginsAPI: config = 'tasks: {}' def test_plugins_api(self, api_client, schema_match): rsp = api_client.get('/plugins/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.plugin_list_reply, data) assert not errors rsp = api_client.get('/plugins/?include_schema=true') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.plugin_list_reply, data) assert not errors rsp = api_client.get('/plugins/?interface=search') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.plugin_list_reply, data) assert not errors rsp = api_client.get('/plugins/?interface=fgfg') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.plugin_list_reply, data) assert not errors assert data == [] rsp = api_client.get('/plugins/?phase=fgfg') assert rsp.status_code == 400, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors rsp = api_client.get('/plugins/?phase=input') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.plugin_list_reply, data) assert not errors rsp = api_client.get('/plugins/bla/') assert rsp.status_code == 400, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors rsp = api_client.get('/plugins/seen/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.plugin_object, data) assert not errors rsp = api_client.get('/plugins/seen/?include_schema=true') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.plugin_object, data) assert not errors
{ "repo_name": "Flexget/Flexget", "path": "flexget/tests/api_tests/test_plugins_api.py", "copies": "3", "size": "2699", "license": "mit", "hash": -6348832221400166000, "line_mean": 36.4861111111, "line_max": 78, "alpha_frac": 0.625416821, "autogenerated": false, "ratio": 3.523498694516971, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5648915515516971, "avg_score": null, "num_lines": null }
from flexget.api.app import base_message from flexget.api.core.user import ObjectsContainer as OC from flexget.utils import json class TestUserAPI: config = 'tasks: {}' def test_change_password(self, execute_task, api_client, schema_match): weak_password = {'password': 'weak'} medium_password = {'password': 'a.better.password'} strong_password = {'password': 'AVer123y$ron__g-=PaW[]rd'} rsp = api_client.json_put('/user/', data=json.dumps(weak_password)) assert rsp.status_code == 400 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors rsp = api_client.json_put('/user/', data=json.dumps(medium_password)) assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors rsp = api_client.json_put('/user/', data=json.dumps(strong_password)) assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors def test_change_token(self, execute_task, api_client, schema_match): rsp = api_client.json_put('user/token/') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.user_token_response, data) assert not errors
{ "repo_name": "Flexget/Flexget", "path": "flexget/tests/api_tests/test_user_api.py", "copies": "3", "size": "1468", "license": "mit", "hash": 1258101623383116000, "line_mean": 34.8048780488, "line_max": 77, "alpha_frac": 0.6376021798, "autogenerated": false, "ratio": 3.5458937198067635, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 41 }
from flexget.api.app import base_message from flexget.components.history.api import ObjectsContainer as OC from flexget.components.history.db import History from flexget.manager import Session from flexget.utils import json class TestHistoryAPI: config = "{'tasks': {}}" def test_history(self, api_client, schema_match): rsp = api_client.get('/history/') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert data == [] history_entry = dict( task='test_task1', title='test_title1', url='test_url1', filename='test_filename1', details='test_details1', ) with Session() as session: item = History() for key, value in history_entry.items(): setattr(item, key, value) session.add(item) session.commit() rsp = api_client.get('/history/') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors for key, value in history_entry.items(): assert data[0][key] == value rsp = api_client.get('/history/?task=test_task1') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors for key, value in history_entry.items(): assert data[0][key] == value rsp = api_client.get('/history/?task=bla') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert data == [] class TestHistoryPaginationAPI: config = "{'tasks': {}}" def test_history_pagination(self, api_client, schema_match, link_headers): history_entry = dict( task='test_task_', title='test_title_', url='test_url_', filename='test_filename_', details='test_details_', ) num_of_entries = 200 with Session() as session: for i in range(num_of_entries): item = History() for key, value in history_entry.items(): setattr(item, key, value + str(i)) session.add(item) rsp = api_client.get('/history/') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert len(data) == 50 # Default page size assert int(rsp.headers['total-count']) == 200 assert int(rsp.headers['count']) == 50 links = link_headers(rsp) assert links['last']['page'] == 4 assert links['next']['page'] == 2 rsp = api_client.get('/history/?per_page=100') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert len(data) == 100 assert int(rsp.headers['count']) == 100 links = link_headers(rsp) assert links['last']['page'] == 2 assert links['next']['page'] == 2 # Per page is limited to 100 rsp = api_client.get('/history/?per_page=200') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert len(data) == 100 rsp = api_client.get('/history/?page=2&sort_by=id&order=asc') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert data[0]['task'] == 'test_task_50' links = link_headers(rsp) assert links['last']['page'] == 4 assert links['next']['page'] == 3 assert links['prev']['page'] == 1 # Non existent page rsp = api_client.get('/history/?page=5') assert rsp.status_code == 404 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(base_message, data) assert not errors def test_history_sorting(self, api_client, schema_match, link_headers): history_entry1 = dict( task='test_task_1', title='test_title_a', url='test_url_1', filename='test_filename_a', details='test_details_1', ) history_entry2 = dict( task='test_task_2', title='test_title_b', url='test_url_2', filename='test_filename_b', details='test_details_2', ) history_entry3 = dict( task='test_task_3', title='test_title_c', url='test_url_3', filename='test_filename_c', details='test_details_3', ) entries = [history_entry1, history_entry2, history_entry3] with Session() as session: for entry in entries: item = History() for key, value in entry.items(): setattr(item, key, value) session.add(item) rsp = api_client.get('/history/?sort_by=id') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert data[0]['id'] == 3 assert int(rsp.headers['total-count']) == 3 assert int(rsp.headers['count']) == 3 rsp = api_client.get('/history/?sort_by=task&order=asc') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert data[0]['task'] == 'test_task_1' rsp = api_client.get('/history/?sort_by=details') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert data[0]['details'] == 'test_details_3' rsp = api_client.get('/history/?per_page=2') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert len(data) == 2 assert int(rsp.headers['total-count']) == 3 assert int(rsp.headers['count']) == 2 rsp = api_client.get('/history/?per_page=2&page=2') assert rsp.status_code == 200 data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.history_list_object, data) assert not errors assert len(data) == 1 assert int(rsp.headers['total-count']) == 3 assert int(rsp.headers['count']) == 1
{ "repo_name": "ianstalk/Flexget", "path": "flexget/tests/api_tests/test_history_api.py", "copies": "3", "size": "7200", "license": "mit", "hash": 2742678576750853000, "line_mean": 30.3043478261, "line_max": 78, "alpha_frac": 0.5597222222, "autogenerated": false, "ratio": 3.8318254390633317, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 230 }
from flexget.api import APIResource, api from flexget.api.app import base_message_schema, success_response schema_api = api.namespace( 'format_check', description='Test Flexget custom schema format validations' ) class ObjectContainer: format_checker_input = { 'type': 'object', 'properties': { 'quality': {'type': 'string', 'format': 'quality'}, 'quality_requirements': {'type': 'string', 'format': 'quality_requirements'}, 'time': {'type': 'string', 'format': 'time'}, 'interval': {'type': 'string', 'format': 'interval'}, 'size': {'type': 'string', 'format': 'size'}, 'percent': {'type': 'string', 'format': 'percent'}, 'regex': {'type': 'string', 'format': 'regex'}, 'file': {'type': 'string', 'format': 'file'}, 'path': {'type': 'string', 'format': 'path'}, 'url': {'type': 'string', 'format': 'url'}, 'episode_identifier': {'type': 'string', 'format': 'episode_identifier'}, 'episode_or_season_id': {'type': 'string', 'format': 'episode_or_season_id'}, }, } format_checker_schema = api.schema_model('format_checker', ObjectContainer.format_checker_input) @schema_api.route('/', doc=False) class SchemaTest(APIResource): @api.validate(format_checker_schema) @api.response(200, model=base_message_schema) def post(self, session=None): """ Validate flexget custom schema""" # If validation passed, all is well return success_response('payload is valid')
{ "repo_name": "malkavi/Flexget", "path": "flexget/api/core/format_checker.py", "copies": "2", "size": "1580", "license": "mit", "hash": 6187390256382349000, "line_mean": 39.5128205128, "line_max": 96, "alpha_frac": 0.5848101266, "autogenerated": false, "ratio": 3.816425120772947, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5401235247372946, "avg_score": null, "num_lines": null }
from flexget.components.managed_lists.lists.movie_list.api import ObjectsContainer as OC from flexget.utils import json class TestETAG: config = 'tasks: {}' def test_etag(self, api_client, schema_match): # Test ETag creation and usage # Create movie lists list_1 = {'name': 'list_1'} list_2 = {'name': 'list_2'} # Create lists rsp = api_client.json_post('/movie_list/', data=json.dumps(list_1)) assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code rsp = api_client.json_post('/movie_list/', data=json.dumps(list_2)) assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code # Get ETag rsp = api_client.get('/movie_list/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code etag = rsp.headers.get('etag') assert etag is not None # Test If-None-Match header = {'If-None-Match': etag} rsp = api_client.head('/movie_list/', headers=header) assert rsp.status_code == 304, 'Response code is %s' % rsp.status_code header = {'If-None-Match': etag} rsp = api_client.get('/movie_list/', headers=header) assert rsp.status_code == 304, 'Response code is %s' % rsp.status_code data = rsp.get_data(as_text=True) assert data is '' header = {'If-None-Match': '*'} rsp = api_client.head('/movie_list/', headers=header) assert rsp.status_code == 304, 'Response code is %s' % rsp.status_code # Test If-Match header = {'If-Match': 'not_etag'} rsp = api_client.head('/movie_list/', headers=header) assert rsp.status_code == 412, 'Response code is %s' % rsp.status_code # Change data list_3 = {'name': 'list_3'} rsp = api_client.json_post('/movie_list/', data=json.dumps(list_3)) assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code header = {'If-None-Match': etag} rsp = api_client.get('/movie_list/', headers=header) assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code data = json.loads(rsp.get_data(as_text=True)) errors = schema_match(OC.return_lists, data) assert not errors # Verify all 3 lists are received as payload assert len(data) == 3
{ "repo_name": "Flexget/Flexget", "path": "flexget/tests/api_tests/test_etag.py", "copies": "3", "size": "2371", "license": "mit", "hash": 5660320131921077000, "line_mean": 37.868852459, "line_max": 88, "alpha_frac": 0.5934204977, "autogenerated": false, "ratio": 3.502215657311669, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0001841959845275373, "num_lines": 61 }
from flexget.components.managed_lists.lists.pending_list.db import PendingListList from flexget.manager import Session class TestListInterface: config = """ templates: global: disable: [seen] tasks: list_get: pending_list: test_list list_get_pending: pending_list: list_name: 'test_list' include: 'pending' list_get_approved: pending_list: list_name: 'test_list' include: 'approved' list_get_all: pending_list: list_name: 'test_list' include: 'all' pending_list_add: mock: - {title: 'title 1', url: "http://mock.url/file1.torrent"} - {title: 'title 2', url: "http://mock.url/file2.torrent"} accept_all: yes list_add: - pending_list: test_list pending_list_match: mock: - {title: 'title 1', url: "http://mock.url/file1.torrent"} - {title: 'title 2', url: "http://mock.url/file2.torrent"} - {title: 'title 3', url: "http://mock.url/file3.torrent"} list_match: from: - pending_list: test_list """ def test_list_add(self, execute_task): task = execute_task('pending_list_add') assert len(task.entries) == 2 task = execute_task('list_get') assert len(task.entries) == 0 with Session() as session: list = session.query(PendingListList).first() assert list for entry in list.entries: entry.approved = True task = execute_task('list_get') assert len(task.entries) == 2 def test_list_match(self, execute_task): task = execute_task('pending_list_add') assert len(task.entries) == 2 task = execute_task('pending_list_match') assert len(task.accepted) == 0 with Session() as session: list = session.query(PendingListList).first() assert list for entry in list.entries: entry.approved = True task = execute_task('list_get') assert len(task.entries) == 2 task = execute_task('pending_list_match') assert len(task.accepted) == 2 task = execute_task('list_get') assert len(task.entries) == 0 def test_list_get_include(self, execute_task): task = execute_task('pending_list_add') assert len(task.entries) == 2 with Session() as session: entry = session.query(PendingListList).first().entries.first() entry.approved = True task = execute_task('list_get_all') assert len(task.entries) == 2 task = execute_task('list_get_pending') assert len(task.entries) == 1 task = execute_task('list_get_approved') assert len(task.entries) == 1
{ "repo_name": "Flexget/Flexget", "path": "flexget/tests/test_pending_list.py", "copies": "1", "size": "3049", "license": "mit", "hash": -6093536745871724000, "line_mean": 28.8921568627, "line_max": 82, "alpha_frac": 0.526402099, "autogenerated": false, "ratio": 4.087131367292225, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5113533466292225, "avg_score": null, "num_lines": null }
from flexget.components.variables.variables import Variables from flexget.manager import Session from flexget.utils import json class TestVariablesAPI: config = 'tasks: {}' variables_dict = {'test_variable_db': True} def test_variables_get(self, api_client): with Session() as session: s = Variables(variables=self.variables_dict) session.add(s) rsp = api_client.get('/variables/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code assert json.loads(rsp.get_data(as_text=True)) == self.variables_dict def test_variables_put(self, api_client): rsp = api_client.get('/variables/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code assert json.loads(rsp.get_data(as_text=True)) == {} rsp = api_client.json_put('/variables/', data=json.dumps(self.variables_dict)) assert rsp.status_code == 201, 'Response code is %s' % rsp.status_code assert json.loads(rsp.get_data(as_text=True)) == self.variables_dict rsp = api_client.get('/variables/') assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code assert json.loads(rsp.get_data(as_text=True)) == self.variables_dict def test_variables_patch(self, api_client): data = {'a': 'b', 'c': 'd'} api_client.json_put('/variables/', data=json.dumps(data)) new_data = {'a': [1, 2, 3], 'foo': 'bar'} rsp = api_client.json_patch('/variables/', data=json.dumps(new_data)) assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code assert json.loads(rsp.get_data(as_text=True)) == {'a': [1, 2, 3], 'foo': 'bar', 'c': 'd'}
{ "repo_name": "malkavi/Flexget", "path": "flexget/tests/api_tests/test_variables_api.py", "copies": "3", "size": "1727", "license": "mit", "hash": 3342768241605145600, "line_mean": 42.175, "line_max": 97, "alpha_frac": 0.6218876665, "autogenerated": false, "ratio": 3.3862745098039215, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0005424583626554069, "num_lines": 40 }
from flexget.entry import Entry, register_lazy_lookup from flexget.plugin import PluginError @register_lazy_lookup('lazy_a') def lazy_a(entry): if 'fail' in entry: raise PluginError('oh no!') for f in ['a_field', 'ab_field', 'a_fail']: entry[f] = 'a' @register_lazy_lookup('lazy_b') def lazy_b(entry): for f in ['b_field', 'ab_field', 'a_fail']: entry[f] = 'b' class TestLazyFields: def test_lazy_queue(self): """Tests behavior when multiple plugins register lazy lookups for the same field""" def setup_entry(): entry = Entry() entry.add_lazy_fields('lazy_a', ['ab_field', 'a_field', 'a_fail']) entry.add_lazy_fields('lazy_b', ['ab_field', 'b_field', 'a_fail']) return entry entry = setup_entry() assert entry['b_field'] == 'b', 'Lazy lookup failed' assert entry['ab_field'] == 'b', 'ab_field should be `b` when lazy_b is run first' # Now cause 'a' lookup to occur assert entry['a_field'] == 'a' # TODO: What is the desired result when a lookup has information that is already populated? # assert entry['ab_field'] == 'b' # Test fallback when first lookup fails entry = setup_entry() entry['fail'] = True assert entry['a_fail'] == 'b', 'Lookup should have fallen back to b' assert entry['a_field'] is None, 'a_field should be None after failed lookup' assert entry['ab_field'] == 'b', 'ab_field should be `b`'
{ "repo_name": "Flexget/Flexget", "path": "flexget/tests/test_lazy_fields.py", "copies": "3", "size": "1528", "license": "mit", "hash": 4180245095680235000, "line_mean": 35.380952381, "line_max": 99, "alpha_frac": 0.5896596859, "autogenerated": false, "ratio": 3.5288683602771362, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001035392456741583, "num_lines": 42 }
from flexget import options from flexget.event import event from flexget.manager import Session from flexget.terminal import TerminalTable, TerminalTableError, console, table_parser from . import db def do_cli(manager, options): if options.action == 'clear': num = db.clear_entries(options.task, all=True) console('%s entries cleared from backlog.' % num) else: header = ['Title', 'Task', 'Expires'] table_data = [header] with Session() as session: entries = db.get_entries(options.task, session=session) for entry in entries: table_data.append( [entry.title, entry.task, entry.expire.strftime('%Y-%m-%d %H:%M')] ) try: table = TerminalTable(options.table_type, table_data, wrap_columns=[0]) console(table.output) except TerminalTableError as e: console('ERROR: %s' % str(e)) @event('options.register') def register_options(): parser = options.register_command( 'backlog', do_cli, help='View or clear entries from backlog plugin', parents=[table_parser] ) parser.add_argument( 'action', choices=['list', 'clear'], help='Choose to show items in backlog, or clear all of them', ) parser.add_argument('task', nargs='?', help='Limit to specific task (if supplied)')
{ "repo_name": "Flexget/Flexget", "path": "flexget/components/backlog/cli.py", "copies": "3", "size": "1395", "license": "mit", "hash": 546965848051103170, "line_mean": 34.7692307692, "line_max": 99, "alpha_frac": 0.6164874552, "autogenerated": false, "ratio": 3.951841359773371, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0014459117466334788, "num_lines": 39 }
from flexget import options from flexget.event import event from flexget.terminal import console from flexget.utils.database import with_session from flexget.webserver import WeakPassword, change_password, generate_token, get_user @with_session def do_cli(manager, options, session=None): if options.action == 'passwd': try: change_password(password=options.password, session=session) except WeakPassword as e: console(e.value) return console('Updated password') if options.action == 'gentoken': token = generate_token(session=session) console('Generated new token %s' % token) if options.action == 'showtoken': user = get_user() console('Token: %s' % user.token) @event('options.register') def register_parser_arguments(): parser = options.register_command('web', do_cli, help='Manage web server settings') subparsers = parser.add_subparsers(dest='action', metavar='<action>') pwd_parser = subparsers.add_parser('passwd', help='change password for web server') pwd_parser.add_argument('password', metavar='<new password>', help='New Password') subparsers.add_parser('gentoken', help='Generate a new api token') subparsers.add_parser('showtoken', help='Show api token')
{ "repo_name": "Flexget/Flexget", "path": "flexget/plugins/cli/web.py", "copies": "3", "size": "1306", "license": "mit", "hash": -343943804757006460, "line_mean": 35.2777777778, "line_max": 87, "alpha_frac": 0.6891271057, "autogenerated": false, "ratio": 4.08125, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.62703771057, "avg_score": null, "num_lines": null }
from flexget import options from flexget.event import event from flexget.terminal import TerminalTable, TerminalTableError, console, table_parser from flexget.utils.template import get_template, list_templates def list_file_templates(manager, options): header = ['Name', 'Use with', 'Full Path', 'Contents'] table_data = [header] console('Fetching all file templates, stand by...') for template_name in list_templates(extensions=['template']): if options.name and not options.name in template_name: continue template = get_template(template_name) if 'entries' in template_name: plugin = 'notify_entries' elif 'task' in template_name: plugin = 'notify_task' else: plugin = '-' name = template_name.replace('.template', '').split('/') if len(name) == 2: name = name[1] with open(template.filename) as contents: table_data.append([name, plugin, template.filename, contents.read()]) try: table = TerminalTable( options.table_type, table_data, wrap_columns=[2, 3], drop_columns=[2, 3] ) except TerminalTableError as e: console('ERROR: %s' % str(e)) else: console(table.output) @event('options.register') def register_parser_arguments(): parser = options.register_command( 'templates', list_file_templates, help='View all available templates', parents=[table_parser], ) parser.add_argument('--name', help='Filter results by template name')
{ "repo_name": "Flexget/Flexget", "path": "flexget/plugins/cli/templates.py", "copies": "3", "size": "1591", "license": "mit", "hash": 1649782827353000200, "line_mean": 34.3555555556, "line_max": 85, "alpha_frac": 0.6279069767, "autogenerated": false, "ratio": 4.100515463917525, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0011435722374073891, "num_lines": 45 }
from flexget import options, plugin from flexget.event import event from flexget.manager import Session from flexget.terminal import TerminalTable, TerminalTableError, console, table_parser from . import db def action_auth(options): if not (options.account): console( 'You must specify an account (local identifier) so we know where to save your access token!' ) return try: db.get_access_token(options.account, options.pin, re_auth=True, called_from_cli=True) console('Successfully authorized Flexget app on Trakt.tv. Enjoy!') return except plugin.PluginError as e: console('Authorization failed: %s' % e) def action_list(options): with Session() as session: if not options.account: # Print all accounts accounts = session.query(db.TraktUserAuth).all() if not accounts: console('No trakt authorizations stored in database.') return header = ['Account', 'Created', 'Expires'] table_data = [header] for auth in accounts: table_data.append( [ auth.account, auth.created.strftime('%Y-%m-%d'), auth.expires.strftime('%Y-%m-%d'), ] ) try: table = TerminalTable(options.table_type, table_data) console(table.output) return except TerminalTableError as e: console('ERROR: %s' % str(e)) # Show a specific account acc = ( session.query(db.TraktUserAuth) .filter(db.TraktUserAuth.account == options.account) .first() ) if acc: console('Authorization expires on %s' % acc.expires) else: console('Flexget has not been authorized to access your account.') def action_refresh(options): if not options.account: console('Please specify an account') return try: db.get_access_token(options.account, refresh=True) console('Successfully refreshed your access token.') return except plugin.PluginError as e: console('Authorization failed: %s' % e) def action_delete(options): if not options.account: console('Please specify an account') return try: db.delete_account(options.account) console('Successfully deleted your access token.') return except plugin.PluginError as e: console('Deletion failed: %s' % e) def do_cli(manager, options): action_map = { 'auth': action_auth, 'list': action_list, 'refresh': action_refresh, 'delete': action_delete, } action_map[options.action](options) @event('options.register') def register_parser_arguments(): acc_text = 'Local identifier which should be used in your config to refer these credentials' # Register subcommand parser = options.register_command( 'trakt', do_cli, help='View and manage trakt authentication.' ) # Set up our subparsers subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='action') auth_parser = subparsers.add_parser( 'auth', help='Authorize Flexget to access your Trakt.tv account' ) auth_parser.add_argument('account', metavar='<account>', help=acc_text) auth_parser.add_argument( 'pin', metavar='<pin>', help='Get this by authorizing FlexGet to use your trakt account ' 'at %s. WARNING: DEPRECATED.' % db.PIN_URL, nargs='?', ) show_parser = subparsers.add_parser( 'list', help='List expiration date for Flexget authorization(s) (don\'t worry, ' 'they will automatically refresh when expired)', parents=[table_parser], ) show_parser.add_argument('account', metavar='<account>', nargs='?', help=acc_text) refresh_parser = subparsers.add_parser( 'refresh', help='Manually refresh your access token associated with your' ' --account <name>', ) refresh_parser.add_argument('account', metavar='<account>', help=acc_text) delete_parser = subparsers.add_parser( 'delete', help='Delete the specified <account> name from local database' ) delete_parser.add_argument('account', metavar='<account>', help=acc_text)
{ "repo_name": "ianstalk/Flexget", "path": "flexget/components/trakt/cli.py", "copies": "3", "size": "4480", "license": "mit", "hash": -1209272936341341200, "line_mean": 32.1851851852, "line_max": 104, "alpha_frac": 0.6029017857, "autogenerated": false, "ratio": 4.299424184261037, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6402325969961037, "avg_score": null, "num_lines": null }
from flexget import options, plugin from flexget.event import event from flexget.terminal import TerminalTable, TerminalTableError, console, table_parser from flexget.utils.database import with_session from . import db try: # NOTE: Importing other plugins is discouraged! from flexget.components.imdb.utils import extract_id, is_imdb_url except ImportError: raise plugin.DependencyError(issued_by=__name__, missing='imdb') def do_cli(manager, options): if options.seen_action == 'forget': seen_forget(manager, options) elif options.seen_action == 'add': seen_add(options) elif options.seen_action == 'search': seen_search(options) def seen_forget(manager, options): forget_name = options.forget_value if is_imdb_url(forget_name): imdb_id = extract_id(forget_name) if imdb_id: forget_name = imdb_id count, fcount = db.forget(forget_name) console('Removed %s titles (%s fields)' % (count, fcount)) manager.config_changed() def seen_add(options): seen_name = options.add_value if is_imdb_url(seen_name): console('IMDB url detected, try to parse ID') imdb_id = extract_id(seen_name) if imdb_id: seen_name = imdb_id else: console("Could not parse IMDB ID") db.add(seen_name, 'cli_add', {'cli_add': seen_name}) console('Added %s as seen. This will affect all tasks.' % seen_name) @with_session def seen_search(options, session=None): search_term = options.search_term if is_imdb_url(search_term): console('IMDB url detected, parsing ID') imdb_id = extract_id(search_term) if imdb_id: search_term = imdb_id else: console("Could not parse IMDB ID") else: search_term = '%' + options.search_term + '%' seen_entries = db.search(value=search_term, status=None, session=session) table_data = [] for se in seen_entries.all(): table_data.append(['Title', se.title]) for sf in se.fields: if sf.field.lower() == 'title': continue table_data.append(['{}'.format(sf.field.upper()), str(sf.value)]) table_data.append(['Task', se.task]) table_data.append(['Added', se.added.strftime('%Y-%m-%d %H:%M')]) if options.table_type != 'porcelain': table_data.append(['', '']) if not table_data: console('No results found for search') return if options.table_type != 'porcelain': del table_data[-1] try: table = TerminalTable(options.table_type, table_data, wrap_columns=[1]) table.table.inner_heading_row_border = False console(table.output) except TerminalTableError as e: console('ERROR: %s' % str(e)) @event('options.register') def register_parser_arguments(): parser = options.register_command( 'seen', do_cli, help='View or forget entries remembered by the seen plugin' ) subparsers = parser.add_subparsers(dest='seen_action', metavar='<action>') forget_parser = subparsers.add_parser( 'forget', help='Forget entry or entire task from seen plugin database' ) forget_parser.add_argument( 'forget_value', metavar='<value>', help='Title or url of entry to forget, or name of task to forget', ) add_parser = subparsers.add_parser('add', help='Add a title or url to the seen database') add_parser.add_argument('add_value', metavar='<value>', help='the title or url to add') search_parser = subparsers.add_parser( 'search', help='Search text from the seen database', parents=[table_parser] ) search_parser.add_argument('search_term', metavar='<search term>')
{ "repo_name": "Flexget/Flexget", "path": "flexget/components/seen/cli.py", "copies": "1", "size": "3761", "license": "mit", "hash": 2834679207905315300, "line_mean": 34.4811320755, "line_max": 93, "alpha_frac": 0.632544536, "autogenerated": false, "ratio": 3.6800391389432487, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9809897576175513, "avg_score": 0.0005372197535471672, "num_lines": 106 }
from flexget import plugin from flexget.event import event from . import db from . import series as plugin_series class FilterSeriesPremiere(plugin_series.FilterSeriesBase): """ Accept an entry that appears to be the first episode of any series. Can be configured with any of the options of series plugin Examples: series_premiere: yes series_premiere: path: ~/Media/TV/_NEW_/. quality: 720p timeframe: 12 hours NOTE: this plugin only looks in the entry title and expects the title format to start with the series name followed by the episode info. Use the manipulate plugin to modify the entry title to match this format, if necessary. TODO: - integrate thetvdb to allow refining by genres, etc. """ @property def schema(self): settings = self.settings_schema settings['properties']['allow_seasonless'] = {'type': 'boolean'} settings['properties']['allow_teasers'] = {'type': 'boolean'} return {'anyOf': [{'type': 'boolean'}, settings]} # Run after series and metainfo series plugins @plugin.priority(115) def on_task_metainfo(self, task, config): if not config: # Don't run when we are disabled return # Generate the group settings for series plugin group_settings = {} allow_seasonless = False desired_eps = [0, 1] if isinstance(config, dict): allow_seasonless = config.pop('allow_seasonless', False) if not config.pop('allow_teasers', True): desired_eps = [1] group_settings = config group_settings['identified_by'] = 'ep' # Generate a list of unique series that have premieres guess_entry = plugin.get('metainfo_series', self).guess_entry # Make a set of unique series according to series name normalization rules guessed_series = {} for entry in task.entries: if guess_entry(entry, allow_seasonless=allow_seasonless, config=group_settings): if ( not entry['season_pack'] and entry['series_season'] == 1 and entry['series_episode'] in desired_eps ): normalized_name = plugin_series.normalize_series_name(entry['series_name']) db_series = ( task.session.query(db.Series) .filter(db.Series.name == normalized_name) .first() ) if db_series and db_series.in_tasks: continue guessed_series.setdefault(normalized_name, entry['series_name']) # Reject any further episodes in those series for entry in task.entries: for series in guessed_series.values(): if entry.get('series_name') == series and ( entry.get('season_pack') or not ( entry.get('series_season') == 1 and entry.get('series_episode') in desired_eps ) ): entry.reject('Non premiere episode or season pack in a premiere series') # Combine settings and series into series plugin config format allseries = { 'settings': {'series_premiere': group_settings}, 'series_premiere': list(guessed_series.values()), } # Merge the our config in to the main series config self.merge_config(task, allseries) @event('plugin.register') def register_plugin(): plugin.register(FilterSeriesPremiere, 'series_premiere', api_ver=2)
{ "repo_name": "Flexget/Flexget", "path": "flexget/components/series/series_premiere.py", "copies": "3", "size": "3736", "license": "mit", "hash": 5193975493005425000, "line_mean": 37.5154639175, "line_max": 95, "alpha_frac": 0.5808351178, "autogenerated": false, "ratio": 4.458233890214797, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6539069008014797, "avg_score": null, "num_lines": null }
from flexget.manager import Session from flexget.plugins.filter.upgrade import EntryUpgrade class TestUpgrade: config = """ tasks: first_download: accept_all: yes upgrade: tracking: yes mock: - {title: 'Movie.720p.WEB-DL.X264.AC3-GRP1', 'media_id': 'Movie'} tracking_only: upgrade: tracking: yes mock: - {title: 'Movie.1080p WEB-DL X264 AC3', 'media_id': 'Movie'} upgrade_quality: upgrade: target: 1080p mock: - {title: 'Movie.1080p WEB-DL X264 AC3', 'media_id': 'Movie'} - {title: 'Movie.720p.WEB-DL.X264.AC3', 'media_id': 'Movie'} - {title: 'Movie.BRRip.x264.720p', 'media_id': 'Movie'} reject_lower: upgrade: target: 1080p on_lower: reject mock: - {title: 'Movie.1080p.BRRip.X264.AC3', 'media_id': 'Movie'} - {title: 'Movie.1080p WEB-DL X264', 'media_id': 'Movie'} - {title: 'Movie.BRRip.x264.720p', 'media_id': 'Movie'} """ def test_learn(self, execute_task): execute_task('first_download') with Session() as session: query = session.query(EntryUpgrade).all() assert len(query) == 1, 'There should be one tracked entity present.' assert query[0].id == 'movie', 'Should have tracked name `Movie`.' def test_tracking(self, execute_task): execute_task('first_download') task = execute_task('tracking_only') entry = task.find_entry('undecided', title='Movie.1080p WEB-DL X264 AC3') assert entry, 'Movie.1080p WEB-DL X264 AC3 should be undecided' def test_upgrade_quality(self, execute_task): execute_task('first_download') task = execute_task('upgrade_quality') entry = task.find_entry('accepted', title='Movie.1080p WEB-DL X264 AC3') assert entry, 'Movie.1080p WEB-DL X264 AC3 should have been accepted' def test_reject_lower(self, execute_task): execute_task('first_download') task = execute_task('reject_lower') entry = task.find_entry('accepted', title='Movie.1080p.BRRip.X264.AC3') assert entry, 'Movie.1080p.BRRip.X264.AC3 should have been accepted' entry = task.find_entry('rejected', title='Movie.1080p WEB-DL X264') assert entry, 'Movie.1080p WEB-DL X264 should have been rejected' entry = task.find_entry('rejected', title='Movie.BRRip.x264.720p') assert entry, 'Movie.BRRip.x264.720p should have been rejected' class TestUpgradeTarget: config = """ tasks: existing_download_480p: upgrade: tracking: yes accept_all: yes mock: - {title: 'Movie.480p.WEB-DL.X264.AC3-GRP1', 'id': 'Movie'} existing_download_1080p: upgrade: tracking: yes accept_all: yes mock: - {title: 'Movie.1080p.WEB-DL.X264.AC3-GRP1', 'id': 'Movie'} target_outside_range: upgrade: target: 720p-1080p mock: - {title: 'Movie.HDRip.XviD.AC3', 'id': 'Movie'} target_within_range: upgrade: target: 720p-1080p mock: - {title: 'Movie.2160p WEB-DL X264 AC3', 'id': 'Movie'} - {title: 'Movie.1080p WEB-DL X264 AC3', 'id': 'Movie'} - {title: 'Movie.720p.WEB-DL.X264.AC3', 'id': 'Movie'} target_quality_1080p: upgrade: target: 1080p mock: - {title: 'Movie.1080p WEB-DL X264 AC3', 'id': 'Movie'} - {title: 'Movie.720p.WEB-DL.X264.AC3', 'id': 'Movie'} """ def test_target_outside_range(self, execute_task): execute_task('existing_download_480p') task = execute_task('target_outside_range') entry = task.find_entry('undecided', title='Movie.HDRip.XviD.AC3') assert entry, 'Movie.HDRip.XviD.AC3 should have been undecided' def test_target_within_range(self, execute_task): execute_task('existing_download_480p') task = execute_task('target_within_range') entry = task.find_entry('accepted', title='Movie.1080p WEB-DL X264 AC3') assert entry, 'Movie.1080p WEB-DL X264 AC3 should have been accepted' for title in ['Movie.2160p WEB-DL X264 AC3', 'Movie.720p.WEB-DL.X264.AC3']: entry = task.find_entry('undecided', title=title) assert entry, '%s should have been undecided' % title def test_target_quality_1080p(self, execute_task): execute_task('existing_download_480p') task = execute_task('target_quality_1080p') entry = task.find_entry('accepted', title='Movie.1080p WEB-DL X264 AC3') assert entry, 'Movie.1080p WEB-DL X264 AC3 should have been accepted' entry = task.find_entry('undecided', title='Movie.720p.WEB-DL.X264.AC3') assert entry, 'Movie.720p.WEB-DL.X264.AC3 should have been undecided' def test_at_target(self, execute_task): execute_task('existing_download_1080p') task = execute_task('target_quality_1080p') entry = task.find_entry('undecided', title='Movie.1080p WEB-DL X264 AC3') assert entry, 'Movie.1080p WEB-DL X264 AC3 should have been accepted' entry = task.find_entry('undecided', title='Movie.720p.WEB-DL.X264.AC3') assert entry, 'Movie.720p.WEB-DL.X264.AC3 should have been undecided' class TestUpgradeTimeFrame: config = """ tasks: existing_download_480p: upgrade: tracking: yes accept_all: yes mock: - {title: 'Movie.480p.WEB-DL.X264.AC3-GRP1', 'id': 'Movie'} outside_timeframe: upgrade: timeframe: 0 seconds target: 1080p mock: - {title: 'Movie.1080p WEB-DL X264 AC3', 'id': 'Movie'} within_timeframe: upgrade: timeframe: 1 day target: 1080p mock: - {title: 'Movie.1080p WEB-DL X264 AC3', 'id': 'Movie'} """ def test_outside_timeframe(self, execute_task): execute_task('existing_download_480p') task = execute_task('outside_timeframe') entry = task.find_entry('undecided', title='Movie.1080p WEB-DL X264 AC3') assert entry, 'Movie.HDRip.XviD.AC3 should have been undecided' def test_within_timeframe(self, execute_task): execute_task('existing_download_480p') task = execute_task('within_timeframe') entry = task.find_entry('accepted', title='Movie.1080p WEB-DL X264 AC3') assert entry, 'Movie.HDRip.XviD.AC3 should have been accepted' class TestUpgradePropers: config = """ templates: global: metainfo_movie: yes upgrade: target: 1080p tracking: yes propers: yes tasks: existing_download: accept_all: yes mock: - {title: 'Movie.1080p.WEB-DL.X264.AC3-GRP1'} existing_download_proper: accept_all: yes mock: - {title: 'Movie.1080p PROPER WEB-DL X264 AC3 GRP1'} existing_download_proper_repack: accept_all: yes mock: - {title: 'Movie.1080p PROPER REPACK WEB-DL X264 AC3 GRP1'} upgrade_proper: mock: - {title: 'Movie.1080p REPACK PROPER WEB-DL X264 AC3'} - {title: 'Movie.1080p PROPER WEB-DL X264 AC3'} existing_upgrade_proper: mock: - {title: 'Movie.1080p REPACK PROPER WEB-DL X264 AC3'} - {title: 'Movie.1080p PROPER WEB-DL X264 AC3'} existing_higher_proper: mock: - {title: 'Movie.1080p PROPER WEB-DL X264 AC3'} existing_lower_quality_proper: mock: - {title: 'Movie.720p PROPER WEB-DL X264 AC3'} """ def test_upgrade_proper(self, execute_task): execute_task('existing_download') task = execute_task('upgrade_proper') entry = task.find_entry('accepted', title='Movie.1080p REPACK PROPER WEB-DL X264 AC3') assert entry, 'Movie.1080p REPACK PROPER WEB-DL X264 AC3 should have been accepted' def test_existing_upgrade_proper(self, execute_task): execute_task('existing_download_proper') task = execute_task('existing_upgrade_proper') entry = task.find_entry('accepted', title='Movie.1080p REPACK PROPER WEB-DL X264 AC3') assert entry, 'Movie.1080p REPACK PROPER WEB-DL X264 AC3 should have been accepted' def test_existing_higher_proper(self, execute_task): execute_task('existing_download_proper_repack') task = execute_task('existing_higher_proper') entry = task.find_entry('undecided', title='Movie.1080p PROPER WEB-DL X264 AC3') assert entry, 'Movie.1080p PROPER WEB-DL X264 AC3 should have been undecided' def test_existing_lower_quality_proper(self, execute_task): execute_task('existing_download') task = execute_task('existing_lower_quality_proper') entry = task.find_entry('undecided', title='Movie.720p PROPER WEB-DL X264 AC3') assert entry, 'Movie.720p PROPER WEB-DL X264 AC3 should have been undecided'
{ "repo_name": "Flexget/Flexget", "path": "flexget/tests/test_upgrade.py", "copies": "3", "size": "9539", "license": "mit", "hash": 8755998104839221000, "line_mean": 40.4739130435, "line_max": 94, "alpha_frac": 0.5811929972, "autogenerated": false, "ratio": 3.518627812615271, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001028392622014653, "num_lines": 230 }
from flexget.plugin import get_plugin_by_name class TestURLRewriters: """ Bad example, does things manually, you should use task.find_entry to check existance """ config = """ tasks: test: # make test data mock: - {title: 'tpb page', url: 'https://thepiratebay.org/tor/8492471/Test.avi'} - {title: 'tbp search', url: 'https://thepiratebay.org/search/something'} - {title: 'tbp torrent', url: 'https://torrents.thepiratebay.org/8492471/Test.torrent'} - {title: 'tbp torrent subdomain', url: 'https://torrents.thepiratebay.org/8492471/Test.avi'} - {title: 'tbp torrent bad subdomain', url: 'https://torrent.thepiratebay.org/8492471/Test.avi'} - {title: 'nyaa', url: 'https://www.nyaa.si/view/15'} - {title: 'cinemageddon download', url: 'http://cinemageddon.net/details.php?id=1234'} """ def get_urlrewriter(self, name): info = get_plugin_by_name(name) return info.instance def test_piratebay(self, execute_task): task = execute_task('test') # test with piratebay entry urlrewriter = self.get_urlrewriter('piratebay') entry = task.find_entry(title='tpb page') assert urlrewriter.url_rewritable(task, entry) entry = task.find_entry(title='tbp torrent') assert not urlrewriter.url_rewritable( task, entry ), 'TPB direct torrent link should not be url_rewritable' entry = task.find_entry(title='tbp torrent subdomain') assert urlrewriter.url_rewritable(task, entry) entry = task.find_entry(title='tbp torrent bad subdomain') assert not urlrewriter.url_rewritable( task, entry ), 'TPB link with invalid subdomain should not be url_rewritable' def test_piratebay_search(self, execute_task): task = execute_task('test') # test with piratebay entry urlrewriter = self.get_urlrewriter('piratebay') entry = task.find_entry(title='tbp search') assert urlrewriter.url_rewritable(task, entry) def test_nyaa_torrents(self, execute_task): task = execute_task('test') entry = task.find_entry(title='nyaa') urlrewriter = self.get_urlrewriter('nyaa') assert entry['url'] == 'https://www.nyaa.si/view/15' assert urlrewriter.url_rewritable(task, entry) urlrewriter.url_rewrite(task, entry) assert entry['url'] == 'https://www.nyaa.si/download/15.torrent' def test_cinemageddon(self, execute_task): task = execute_task('test') entry = task.find_entry(title='cinemageddon download') urlrewriter = self.get_urlrewriter('cinemageddon') assert urlrewriter.url_rewritable(task, entry) urlrewriter.url_rewrite(task, entry) assert ( entry['url'] == 'http://cinemageddon.net/download.php?id=1234&name=cinemageddon%20download.torrent' ) class TestRegexpurlrewriter: # TODO: this test is broken? config = r""" tasks: test: mock: - {title: 'irrelevant', url: 'http://newzleech.com/?p=123'} accept_all: yes urlrewrite: newzleech: regexp: 'http://newzleech.com/\?p=(?P<id>\d+)' format: 'http://newzleech.com/?m=gen&dl=1&post=\g<id>' """ def test_newzleech(self, execute_task): task = execute_task('test') assert task.find_entry( url='http://newzleech.com/?m=gen&dl=1&post=123' ), 'did not url_rewrite properly'
{ "repo_name": "malkavi/Flexget", "path": "flexget/tests/test_urlrewriting.py", "copies": "1", "size": "3658", "license": "mit", "hash": 1563985360150615300, "line_mean": 39.1978021978, "line_max": 110, "alpha_frac": 0.6030617824, "autogenerated": false, "ratio": 3.43796992481203, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.454103170721203, "avg_score": null, "num_lines": null }
from flexget.plugins.filter.movie_queue import queue_add, queue_get from tests import FlexGetBase class TestMovieQueue(FlexGetBase): __yaml__ = """ templates: global: mock: - {title: 'MovieInQueue', imdb_id: 'tt1931533', tmdb_id: 603, movie_name: MovieInQueue} accept_all: yes seen: local tasks: movie_queue_accept: movie_queue: accept movie_queue_add: movie_queue: add movie_queue_add_properties: movie_queue: action: add quality: 720p movie_queue_remove: movie_queue: remove movie_queue_forget: movie_queue: forget """ def test_movie_queue_accept(self): queue_add(title=u'MovieInQueue', imdb_id=u'tt1931533', tmdb_id=603) self.execute_task('movie_queue_accept') assert len(self.task.entries) == 1 entry = self.task.entries[0] assert entry.get('imdb_id', eval_lazy=False) == 'tt1931533' assert entry.get('tmdb_id', eval_lazy=False) == 603 self.execute_task('movie_queue_accept') assert len(self.task.entries) == 0, 'Movie should only be accepted once' def test_movie_queue_add(self): self.execute_task('movie_queue_add') assert len(self.task.entries) == 1 queue = queue_get() assert len(queue) == 1 entry = queue[0] assert entry.imdb_id == 'tt1931533' assert entry.tmdb_id == 603 assert entry.quality == 'any' def test_movie_queue_add_properties(self): self.execute_task('movie_queue_add_properties') assert len(self.task.entries) == 1 queue = queue_get() assert len(queue) == 1 entry = queue[0] assert entry.imdb_id == 'tt1931533' assert entry.tmdb_id == 603 assert entry.quality == '720p' def test_movie_queue_remove(self): queue_add(title=u'MovieInQueue', imdb_id=u'tt1931533', tmdb_id=603) queue_add(title=u'KeepMe', imdb_id=u'tt1933533', tmdb_id=604) self.execute_task('movie_queue_remove') assert len(self.task.entries) == 1 queue = queue_get() assert len(queue) == 1 entry = queue[0] assert entry.imdb_id == 'tt1933533' assert entry.tmdb_id == 604 def test_movie_queue_forget(self): queue_add(title=u'MovieInQueue', imdb_id=u'tt1931533', tmdb_id=603) self.execute_task('movie_queue_accept') assert len(queue_get(downloaded=True)) == 1 self.execute_task('movie_queue_forget') assert not queue_get(downloaded=True) assert len(queue_get()) == 1
{ "repo_name": "offbyone/Flexget", "path": "tests/test_movie_queue.py", "copies": "12", "size": "2746", "license": "mit", "hash": 1057269464792644500, "line_mean": 29.1758241758, "line_max": 102, "alpha_frac": 0.5782957028, "autogenerated": false, "ratio": 3.5341055341055343, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": null, "num_lines": null }
from flexible_permissions.models import Permission from flexible_permissions.roles import register_role from tests.models import User, Group, Zoo, Exhibit, Animal def create_test_models(): users = { 'admin': User.objects.create(name='admin user'), 'staff': User.objects.create(name='staff user'), 'visitor': User.objects.create(name='visiting user') } staff = Group.objects.create(name='staff') staff.user_set.add(users['staff']) zoo = Zoo.objects.create() exhibits = [ Exhibit.objects.create(zoo=zoo), Exhibit.objects.create(zoo=zoo), ] animals = [ Animal.objects.create(exhibit=exhibits[0]), Animal.objects.create(exhibit=exhibits[0]), Animal.objects.create(exhibit=exhibits[1]), Animal.objects.create(exhibit=exhibits[1]), ] Permission.objects.bulk_create([ Permission(role='zoo.admin', agent=users['admin'], target=zoo), Permission(role='exhibit.staff', agent=staff, target=exhibits[0]), Permission(role='zoo.visitor', target=zoo), ])
{ "repo_name": "staab/django-flexible-permissions", "path": "tests/utils.py", "copies": "1", "size": "1087", "license": "mit", "hash": 5063708800724455000, "line_mean": 30.0571428571, "line_max": 74, "alpha_frac": 0.6540938362, "autogenerated": false, "ratio": 3.334355828220859, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4488449664420859, "avg_score": null, "num_lines": null }
from flexible_permissions._utils import ensure_plural, is_value """ Maps of roles to actions """ ROLES = {} ACTIONS = {} def calculate_actions(roles): result = {} for role, actions in roles.items(): for action in actions: result.setdefault(action, []) result[action].append(role) result[action] = list(set(result[action])) return result def register_role(name, actions): global ACTIONS for action in actions: if action.count(".") != 1: raise ValueError( "Actions should have one period in them - the left portion" "should be the name of the object; the right the name of the" "action." ) # Merge the actions together pre_existing_actions = ROLES.get(name, []) ROLES[name] = list(set(actions + pre_existing_actions)) # Keep action map in sync ACTIONS = calculate_actions(ROLES) def roles_to_actions(roles): # Keep special values if not is_value(roles): return roles roles = ensure_plural(roles) return list(set([action for role in roles for action in ROLES[role]])) def actions_to_roles(actions): # Keep special values if not is_value(actions): return actions actions = ensure_plural(actions) return list(set([role for action in actions for role in ACTIONS[action]]))
{ "repo_name": "staab/django-flexible-permissions", "path": "flexible_permissions/roles.py", "copies": "1", "size": "1395", "license": "mit", "hash": 7471720684500376000, "line_mean": 23.9107142857, "line_max": 78, "alpha_frac": 0.6229390681, "autogenerated": false, "ratio": 4.067055393586005, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 56 }
from flexmock import flexmock from django.test import TestCase from django.contrib.auth.models import User from nose.tools import raises from tardis.tardis_portal.models import Experiment, ParameterName, Schema, ExperimentParameter, ExperimentParameterSet from tardis.apps.sync.consumer_fsm import Complete, InProgress, FailPermanent, \ CheckingIntegrity, Requested, Ingested from tardis.apps.sync.tasks import clock_tick from tardis.apps.sync.models import SyncedExperiment from ..transfer_service import TransferService from ..managers.default_manager import SyncManager from ..site_manager import SiteManager from httplib2 import Http class ManagerTestCase(TestCase): def setUp(self): self.user = User(username='user1', password='password', email='a@a.com') self.user.save() self.exp = Experiment( approved = True, title = 'title1', institution_name = 'institution1', description = 'description1', created_by = self.user, public_access = Experiment.PUBLIC_ACCESS_FULL ) self.exp.save() self.sync_exp = SyncedExperiment( experiment=self.exp, uid='test.1', provider_url='http://somewhere.com') self.sync_exp.save() self.site_manager = flexmock() flexmock(SiteManager).new_instances(self.site_manager) self.Http = flexmock() flexmock(Http).new_instances(self.Http) class SyncManagerTestCase(ManagerTestCase): def testGenerateUid(self): sm = SyncManager(institution='test') uid = sm.generate_exp_uid(self.exp) self.assertEqual(uid, 'test.1') def testExpFromUid(self): sm = SyncManager(institution='test') result = sm._exp_from_uid('test.1') self.assertEqual(result, self.exp) @raises(TransferService.InvalidUIDError) def testExpFromInvalidInstitution(self): sm = SyncManager(institution='test') result = sm._exp_from_uid('test1.unicorns') @raises(TransferService.InvalidUIDError) def testExpFromInvalidUid(self): sm = SyncManager(institution='test') result = sm._exp_from_uid('testunicorns') @raises(TransferService.InvalidUIDError) def testExpFromInvalidUid(self): sm = SyncManager(institution='test') result = sm._exp_from_uid('test.unicorns') def testGetStatus(self): sm = SyncManager(institution='test') sm.get_status('test.1') def testPostExperiment(self): sm = SyncManager(institution='test') settings = { 'url': 'http://somewhere.com', 'username': 'username', 'password': 'password', 'fileProtocol': 'tardis' } import httplib2 resp = httplib2.Response({'status': '200'}) self.Http.should_receive('request').with_args(settings['url'], 'POST', body=str, headers=dict).and_return((resp, '')) result = sm._post_experiment(self.sync_exp, [], settings) self.assertTrue(result) def testPostExperimentFailed(self): sm = SyncManager(institution='test') settings = { 'url': 'http://somewhere.com', 'username': 'username', 'password': 'password', 'fileProtocol': 'tardis' } import httplib2 resp = httplib2.Response({'status': '500'}) self.Http.should_receive('request').with_args( settings['url'], 'POST', body=str, headers=dict).and_return((resp, '')) result = sm._post_experiment(self.sync_exp, [], settings) self.assertFalse(result) def testGetStatus(self): sm = SyncManager(institution='test') result = sm.get_status('test.1') def testStartFileTransfer(self): settings = { 'transfer': { 'option1': 'option1test' } } self.site_manager.should_receive('get_site_settings').and_return(settings) sm = SyncManager(institution='test') sm._start_file_transfer = lambda *args: args url = 'http://somewhere.com' exp, settings_, dest_path = sm.start_file_transfer('test.1', url, 'path_to_exp') self.assertEqual(exp, self.exp) self.assertEqual(settings['transfer'], settings_) self.assertEqual(dest_path, 'path_to_exp') @raises(TransferService.SiteError) def testStartFileTransferInvalidSite(self): self.site_manager.should_receive('get_site_settings').and_return(None) sm = SyncManager(institution='test') url = 'http://somewhere.com' exp, settings_, dest_path = sm.start_file_transfer('test.1', url, 'path_to_exp')
{ "repo_name": "iiman/mytardis", "path": "tardis/apps/sync/tests/test_managers.py", "copies": "1", "size": "4691", "license": "bsd-3-clause", "hash": -6908066531166342000, "line_mean": 37.4508196721, "line_max": 125, "alpha_frac": 0.6346194841, "autogenerated": false, "ratio": 3.9026622296173046, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.004224439752589577, "num_lines": 122 }
from flexmock import flexmock from framework.db.db import DB import framework.db.db_handler as db_handler from collections import defaultdict from framework.lib import general class DBEnvironmentBuilder(): def build(self): self._create_core_mock() db = flexmock(DB(self.core_mock)) flexmock(db.DBHandler) db.DBHandler.should_receive("InitDB") # Neutralize the access to the file system db.DBHandler.Storage['SEED_DB'] = {"seed/path": {'Data': [], 'SyncCount': 0}} db.DBHandler.GetDBNames_old = db.DBHandler.GetDBNames db.DBHandler.should_receive("GetDBNames").and_return(["db1", "db2", "HTMLID_DB"]) general.INCOMING_QUEUE_TO_DIR_MAPPING = defaultdict(list) general.OUTGOING_QUEUE_TO_DIR_MAPPING = defaultdict(list) self.core_mock.DB = db return db def _create_core_mock(self): self.core_mock = flexmock() self.core_mock.Config = flexmock() self.core_mock.Config.should_receive("GetAll").and_return(["path"]) def fake_get(key): # Faster than loading the real config object values = {"REGEXP_FILE_URL": "^[^\?]+\.(xml|exe|pdf|cs|log|inc|dat|bak|conf|cnf|old|zip|7z|rar|tar|gz|bz2|txt|xls|xlsx|doc|docx|ppt|pptx)$", "REGEXP_SMALL_FILE_URL": "^[^\?]+\.(xml|cs|inc|dat|bak|conf|cnf|old|txt)$", "REGEXP_IMAGE_URL": "^[^\?]+\.(jpg|jpeg|png|gif|bmp)$", "REGEXP_VALID_URL": "^(http|ftp)[^ ]+$", "SIMULATION": True, "SEED_DB": "seed/path", "HTMLID_DB": "path"} return values[key] self.core_mock.Config.Get = fake_get
{ "repo_name": "mikefitz888/owtf", "path": "tests/testing_framework/db/environments.py", "copies": "3", "size": "1709", "license": "bsd-3-clause", "hash": 1307279346603690000, "line_mean": 43.9736842105, "line_max": 152, "alpha_frac": 0.5933294324, "autogenerated": false, "ratio": 3.318446601941748, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0032427567470715694, "num_lines": 38 }
from flexmock import flexmock from os import path from framework.shell.interactive_shell import InteractiveShell from framework.timer import Timer class InteractiveShellEnvironmentBuilder(): def __call__(self): self._create_core_mock() instance = InteractiveShell(self.core_mock) instance.Open(self._get_options_example(), None) return instance def _create_core_mock(self): self.core_mock = flexmock() self._mock_timer() self._mock_config() self._mock_error_handler() self._mock_db() def _mock_timer(self): self.core_mock.Timer = Timer() def _mock_config(self): self.core_mock.Config = flexmock() self.core_mock.Config.should_receive("Get").with_args("TARGET").and_return("localhost") self.core_mock.Config.Get = lambda arg:"localhost" if arg == "TARGET" else "" def _mock_error_handler(self): self.core_mock.Error = flexmock() self.core_mock.Error.should_receive("UserAbort").and_return("") def _mock_db(self): self.core_mock.DB = flexmock() self.core_mock.DB.CommandRegister = flexmock() self.core_mock.DB.CommandRegister.should_receive("AlreadyRegistered").and_return(False) self.core_mock.DB.CommandRegister.Add = lambda arg: True # Due to some flexmock bug with nested objects def _get_options_example(self): return {"ConnectVia": [['', 'bash']], "InitialCommands": None, "CommandsBeforeExit": None, "ExitMethod": "kill", "CommandsBeforeExitDelim": ";"}
{ "repo_name": "DePierre/owtf", "path": "tests/testing_framework/shell/environments.py", "copies": "3", "size": "1620", "license": "bsd-3-clause", "hash": 7671244262712307000, "line_mean": 35, "line_max": 111, "alpha_frac": 0.6333333333, "autogenerated": false, "ratio": 3.8663484486873507, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.599968178198735, "avg_score": null, "num_lines": null }
from flexmock import flexmock from pytest import raises import sqlalchemy as sa from sqlalchemy_utils import ChoiceType, Choice, ImproperlyConfigured from tests import TestCase class TestChoice(object): def test_equality_operator(self): assert Choice(1, 1) == 1 assert 1 == Choice(1, 1) assert Choice(1, 1) == Choice(1, 1) def test_non_equality_operator(self): assert Choice(1, 1) != 2 assert not (Choice(1, 1) != 1) class TestChoiceType(TestCase): def create_models(self): class User(self.Base): TYPES = [ ('admin', 'Admin'), ('regular-user', 'Regular user') ] __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) type = sa.Column(ChoiceType(TYPES)) def __repr__(self): return 'User(%r)' % self.id self.User = User def test_python_type(self): type_ = self.User.__table__.c.type.type assert type_.python_type def test_string_processing(self): flexmock(ChoiceType).should_receive('_coerce').and_return( u'admin' ) user = self.User( type=u'admin' ) self.session.add(user) self.session.commit() user = self.session.query(self.User).first() assert user.type.value == u'Admin' def test_parameter_processing(self): user = self.User( type=u'admin' ) self.session.add(user) self.session.commit() user = self.session.query(self.User).first() assert user.type.value == u'Admin' def test_scalar_attributes_get_coerced_to_objects(self): user = self.User(type=u'admin') assert isinstance(user.type, Choice) def test_throws_exception_if_no_choices_given(self): with raises(ImproperlyConfigured): ChoiceType([]) class TestChoiceTypeWithCustomUnderlyingType(TestCase): def test_init_type(self): type_ = ChoiceType([(1, u'something')], impl=sa.Integer) assert type_.impl == sa.Integer
{ "repo_name": "tonyseek/sqlalchemy-utils", "path": "tests/types/test_choice.py", "copies": "2", "size": "2131", "license": "bsd-3-clause", "hash": 5477864340275635000, "line_mean": 26.3205128205, "line_max": 69, "alpha_frac": 0.5842327546, "autogenerated": false, "ratio": 3.8816029143897994, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 78 }
from flexmock import flexmock from tests.testing_framework.utils import ExpensiveResourceProxy from tests.testing_framework.config.environments import PluginConfigEnvironmentBuilder from framework.plugin.plugin_handler import PluginHandler class PluginHandlerEnvironmentBuilder(): plugin_config_proxy = ExpensiveResourceProxy(PluginConfigEnvironmentBuilder()) def __init__(self): self._create_core_mock() self._create_shell_mock() self.plugin_handler = PluginHandler(self.core_mock, self._get_options()) flexmock(self.plugin_handler.scanner) def get_plugin_handler(self): return self.plugin_handler def _create_core_mock(self): self.core_mock = flexmock() self._create_config_mock() def _create_config_mock(self): self.core_mock.Config = flexmock() self.core_mock.Config.Plugin = self.__class__.plugin_config_proxy.get_instance() self.core_mock.Config.should_receive("GetProcessPerCore").and_return(1) self.core_mock.Config.should_receive("GetMinRam").and_return(0) def _create_shell_mock(self): self.core_mock.Shell = flexmock() self.core_mock.Shell.should_receive("shell_exec").and_return(100) def _get_options(self): return {"Simulation": False, "Scope": "localhost", "PluginGroup": "web", "PluginType": "quiet", "Algorithm": "breadth", "ListPlugins": None, "OnlyPlugins": None, "ExceptPlugins": None}
{ "repo_name": "sharad1126/owtf", "path": "tests/testing_framework/plugin/environments.py", "copies": "3", "size": "1566", "license": "bsd-3-clause", "hash": -9190411053158168000, "line_mean": 36.2857142857, "line_max": 88, "alpha_frac": 0.6526181354, "autogenerated": false, "ratio": 4.005115089514066, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.6157733224914067, "avg_score": null, "num_lines": null }
from flexmock import flexmock import pytest import yaml import anymarkup_core class TestLibsNotInstalled(object): # json is always there, since we only support Python >= 2.7 @pytest.mark.parametrize(('fmt', 'lib'), [ ('ini', 'configobj'), ('xml', 'xmltodict'), ('yaml', 'yaml'), ]) def test_raises_proper_error(self, fmt, lib): flexmock(anymarkup_core).should_receive(lib).and_return(None) flexmock(anymarkup_core).should_receive('fmt_to_lib').and_return({fmt: (None, lib)}) with pytest.raises(anymarkup_core.AnyMarkupError): anymarkup_core.parse('', format=fmt) with pytest.raises(anymarkup_core.AnyMarkupError): anymarkup_core.serialize('', format=fmt) def test_uninstalled_dep_doesnt_make_parsing_fail_for_installed_deps(self): flexmock(anymarkup_core).should_receive('configobj').and_return(None) flexmock(anymarkup_core).should_receive('fmt_to_lib').\ and_return({'ini': (None, ''), 'yaml': (yaml, '')}) with pytest.raises(anymarkup_core.AnyMarkupError): anymarkup_core.parse('', format='ini') assert anymarkup_core.parse('foo: bar') == {'foo': 'bar'} with pytest.raises(anymarkup_core.AnyMarkupError): anymarkup_core.serialize('', format='ini') assert anymarkup_core.serialize({'foo': 'bar'}, format='yaml') == b'foo: bar\n'
{ "repo_name": "bkabrda/anymarkup-core", "path": "test/test_libs_not_installed.py", "copies": "1", "size": "1425", "license": "bsd-3-clause", "hash": -8987527968422217000, "line_mean": 36.5, "line_max": 92, "alpha_frac": 0.6350877193, "autogenerated": false, "ratio": 3.607594936708861, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4742682656008861, "avg_score": null, "num_lines": null }
from flexmock import MethodCallError from flexmock import flexmock_teardown from flexmock_test import assertRaises from nose import with_setup import flexmock import flexmock_test import unittest def test_module_level(): m = flexmock(mod=2) m.should_receive('mod').once assertRaises(MethodCallError, flexmock_teardown) def test_module_level_generator(): mock = flexmock(foo=lambda x, y: None, bar=lambda: None) mock.should_receive('bar').never # change never to once to observe the failure for i in range(0, 3): yield mock.foo, i, i*3 class TestRegularClass(flexmock_test.RegularClass): def test_regular(self): a = flexmock(a=2) a.should_receive('a').once assertRaises(MethodCallError, flexmock_teardown) def test_class_level_generator_tests(self): mock = flexmock(foo=lambda a, b: a) mock.should_receive('bar').never # change never to once to observe the failure for i in range(0, 3): yield mock.foo, i, i*3 class TestUnittestClass(flexmock_test.TestFlexmockUnittest): def test_unittest(self): a = flexmock(a=2) a.should_receive('a').once assertRaises(MethodCallError, flexmock_teardown)
{ "repo_name": "has207/flexmock", "path": "tests/flexmock_nose_test.py", "copies": "2", "size": "1166", "license": "bsd-2-clause", "hash": -4048583715773393400, "line_mean": 26.7619047619, "line_max": 83, "alpha_frac": 0.7298456261, "autogenerated": false, "ratio": 3.3314285714285714, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.007713136730433753, "num_lines": 42 }
from flex.validation.request import ( validate_request, ) from flex.error_messages import MESSAGES from flex.constants import ( PATH, QUERY, STRING, INTEGER, ARRAY ) from tests.factories import ( SchemaFactory, RequestFactory, ) from tests.utils import assert_message_in_errors def test_request_query_parameter_validation_with_no_declared_parameters(): """ Ensure that when a query parameter is present with no definition within the schema that it is ignored. We need to have at least one parameter at play to trigger parameter validation to happen for this endpoint. """ schema = SchemaFactory( parameters = { 'id': { 'name': 'id', 'in': PATH, 'description': 'id', 'required': True, 'type': INTEGER, }, }, paths={ '/get/{id}/': { 'parameters': [ {'$ref': '#/parameters/id'}, ], 'get': { 'responses': {200: {'description': "Success"}}, }, }, }, ) request = RequestFactory(url='http://www.example.com/get/3/?page=3') validate_request( request=request, schema=schema, ) def test_request_query_parameter_validation_with_array_by_multi(): """ Ensure that when a query parameter is present with no definition within the schema that it is ignored. We need to have at least one parameter at play to trigger parameter validation to happen for this endpoint. """ schema = SchemaFactory( parameters = { 'status': { 'name': 'status', 'in': QUERY, 'description': 'status', 'required': True, 'type': ARRAY, "items": { "type": "string", "enum": [ "available", "pending", "sold" ], "default": "available" }, "collectionFormat": "multi" }, }, paths={ '/get': { 'parameters': [ {'$ref': '#/parameters/status'}, ], 'get': { 'responses': {200: {'description': "Success"}}, }, }, }, ) request = RequestFactory(url='http://www.example.com/get?status=sold') validate_request( request=request, schema=schema, )
{ "repo_name": "pipermerriam/flex", "path": "tests/validation/request/test_request_query_parameter_validation.py", "copies": "1", "size": "2671", "license": "mit", "hash": -6747925633655843000, "line_mean": 25.9797979798, "line_max": 79, "alpha_frac": 0.4687383003, "autogenerated": false, "ratio": 4.778175313059034, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0017566974088713218, "num_lines": 99 }
from flexx.app._component2 import PyComponent, JsComponent from flexx.app._component2 import BaseAppComponent, LocalComponent, ProxyComponent from flexx.event import Component from flexx import event, app from flexx.util.testing import run_tests_if_main, raises, skip class StubSession: id = 'y' status = 2 app = None def _register_component(self, c, id=None): id = id or 'x' c._id = id c._uid = self.id + '_' + id def _unregister_component(self, c): pass def send_command(self, *command): pass def keep_alive(self, ob): pass class MyPComponent1(PyComponent): CSS = "xx" foo = event.IntProp() foo2 = event.IntProp() @event.action def increase_foo(self): self._mutate_foo(self.foo + 1) @event.reaction('foo') def track_foo(self, *events): pass class MyJComponent1(JsComponent): CSS = "xx" foo = event.IntProp() foo2 = event.IntProp() @event.action def increase_foo(self): self._mutate_foo(self.foo + 1) @event.reaction('foo') def track_foo(self, *events): pass class MyPComponent2(MyPComponent1): pass class MyJComponent2(MyJComponent1): pass all_classes = [MyPComponent2, MyJComponent2, MyPComponent2.JS, MyJComponent2.JS, MyPComponent1, MyJComponent1, MyPComponent1.JS, MyJComponent1.JS, PyComponent, JsComponent, PyComponent.JS, JsComponent.JS, LocalComponent, ProxyComponent, BaseAppComponent, Component] def test_pycomponent_heritage(): C = MyPComponent2 # Names and repr assert C.__name__ == C.JS.__name__ assert 'PyComponent' in repr(C) and 'PyComponent' in repr(C.JS) assert not 'proxy' in repr(C) and 'proxy' in repr(C.JS) assert not 'JS' in repr(C) and 'for JS' in repr(C.JS) mro = [MyPComponent2, MyPComponent1, PyComponent, LocalComponent, BaseAppComponent, Component, object] # Validate inheritance of py class assert C.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C, cls) # Also check isinstance() foo = C(flx_session=StubSession()) for cls in mro: assert isinstance(foo, cls) for cls in all_classes: if cls not in mro: assert not isinstance(foo, cls) mro = [MyPComponent2.JS, MyPComponent1.JS, PyComponent.JS, ProxyComponent, BaseAppComponent, Component, object] # Validate inheritance of JS class assert C.JS.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C.JS, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C.JS, cls) def test_jscomponent_heritage(): session = app.manager.get_default_session() if session is None: session = app.manager.create_default_session() C = MyJComponent2 # Names and repr assert C.__name__ == C.JS.__name__ assert 'JsComponent' in repr(C) and 'JsComponent' in repr(C.JS) assert 'proxy' in repr(C) and 'proxy' not in repr(C.JS) assert not 'JS' in repr(C) and 'for JS' in repr(C.JS) mro = [MyJComponent2, MyJComponent1, JsComponent, ProxyComponent, BaseAppComponent, Component, object] # Validate inheritance of py class assert C.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C, cls) # Also check isinstance() foo = C(flx_session=session) for cls in mro: assert isinstance(foo, cls) for cls in all_classes: if cls not in mro: assert not isinstance(foo, cls) mro = [MyJComponent2.JS, MyJComponent1.JS, JsComponent.JS, LocalComponent, BaseAppComponent, Component, object] # Validate inheritance of JS class assert C.JS.mro() == mro # Also check issubclass() for cls in mro: assert issubclass(C.JS, cls) for cls in all_classes: if cls not in mro: assert not issubclass(C.JS, cls) def test_properties(): assert MyPComponent2.__properties__ == ['foo', 'foo2'] assert MyPComponent2.JS.__properties__ == ['foo', 'foo2'] assert MyJComponent2.__properties__ == ['foo', 'foo2'] assert MyJComponent2.JS.__properties__ == ['foo', 'foo2'] assert MyPComponent2.__actions__ == ['increase_foo'] assert MyPComponent2.JS.__actions__ == ['_emit_at_proxy'] assert MyJComponent2.__actions__ == ['_emit_at_proxy'] assert MyJComponent2.JS.__actions__ == ['increase_foo'] assert MyPComponent2.__reactions__ == ['track_foo'] assert MyPComponent2.JS.__reactions__ == [] assert MyJComponent2.__reactions__ == [] assert MyJComponent2.JS.__reactions__ == ['track_foo'] def test_cannot_instantiate_without_session(): app.manager.remove_default_session() with raises(RuntimeError) as err: PyComponent() assert 'needs a session!' in str(err.value) with raises(RuntimeError) as err: JsComponent() assert 'needs a session!' in str(err.value) def test_generated_js1(): m = app.assets.modules['flexx.app._component2'] js = m.get_js() classes = [] for line in js.splitlines(): if '._base_class =' in line: classes.append(line.split('.')[0]) assert classes == ['LocalProperty', 'BaseAppComponent', 'LocalComponent', 'ProxyComponent', 'StubComponent', 'JsComponent', 'PyComponent'] print(classes) def test_generated_js2(): js = MyPComponent2.JS.CODE assert '__properties__ = ["foo", "foo2"]' in js assert js.count('foo2') == 1 # in __properties__ assert js.count('increase_foo') == 0 assert js.count('_mutate_') == 0 js = MyJComponent2.JS.CODE assert '__properties__ = ["foo", "foo2"]' in js assert js.count('foo2') == 2 # in __properties__ and __proxy_properties__ assert js.count('increase_foo') == 1 assert js.count('_mutate_') == 0 def test_generated_css1(): assert not hasattr(MyPComponent1.JS, 'CSS') assert not hasattr(MyJComponent1.JS, 'CSS') assert not hasattr(MyPComponent2.JS, 'CSS') assert not hasattr(MyJComponent2.JS, 'CSS') assert MyPComponent1.CSS == 'xx' assert MyJComponent1.CSS == 'xx' assert MyPComponent2.CSS == '' assert MyJComponent2.CSS == '' def test_misc(): clss = app.get_component_classes() assert PyComponent in clss and JsComponent in clss assert LocalComponent not in clss and ProxyComponent not in clss assert BaseAppComponent not in clss # Assert that the list is a copy clss.remove(PyComponent) assert PyComponent in app.get_component_classes() run_tests_if_main()
{ "repo_name": "zoofIO/flexx", "path": "flexx/app/tests/test_component2.py", "copies": "2", "size": "6922", "license": "bsd-2-clause", "hash": -640908081033818600, "line_mean": 27.368852459, "line_max": 115, "alpha_frac": 0.6316093615, "autogenerated": false, "ratio": 3.564366632337796, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5195975993837796, "avg_score": null, "num_lines": null }
from flexx import app, ui, event TEXT = """ hallo dit is wat text als test. lorum epsilum blabla yield renderer class Editor(ui.CanvasWidget): @event.prop(both=True) def font_size(self, v): return int(v) class JS: text = TEXT def init(self): self.ctx = self.node.getContext('2d')#, {alpha: false}) @event.connect('mouse_wheel') def _change_font_size(self, *events): s = self.font_size for ev in events: if ev.vscroll > 0: s += 1 else: s -= 1 self.font_size = max(5, min(s, 30)) """ * 100 class Editor(ui.CanvasWidget): @event.prop(both=True) def font_size(self, v): return int(v) class JS: text = TEXT def init(self): self.ctx = self.node.getContext('2d')#, {'alpha': False}) # Use trick to get HiDPI text: # http://www.html5rocks.com/en/tutorials/canvas/hidpi/ self.dpratio = window.devicePixelRatio or 1 self.bsratio = (self.ctx.webkitBackingStorePixelRatio or self.ctx.mozBackingStorePixelRatio or self.ctx.msBackingStorePixelRatio or self.ctx.oBackingStorePixelRatio or self.ctx.backingStorePixelRatio or 1) @event.connect('mouse_wheel') def _change_font_size(self, *events): s = self.font_size for ev in events: if ev.vscroll > 0: s += 1 else: s -= 1 self.font_size = max(5, min(s, 30)) def _measure_text_height(self): # Inspired by http://stackoverflow.com/a/1135363/2271927 # todo: only do this when font_size changes ctx = self.ctx sample = 'gM(' width = int(ctx.measureText(sample).width+1) height = 100 ctx.fillText(sample, 0, int(height/2)) data = ctx.getImageData(0, 0, width, height).data first = False last = False r = height # Find the last line with a non-white pixel while r > 0 and last == False: r -= 1 for c in range(width): if data[r * width * 4 + c * 4 + 3] > 0: last = r break # Find first line with a non-white pixel while r > 0: r -= 1 for c in range(width): if data[r * width * 4 + c * 4 + 3] > 0: first = r break return last - first @event.connect('size', 'font_size') def update(self, *events): ctx = self.ctx w, h = self.size # Enable hidpi ratio = self.dpratio / self.bsratio self.node.width = w * ratio self.node.height = h * ratio ctx.clearRect(0, 0, w, h) ctx.font = "%ipx DejaVu Sans Mono" % self.font_size cw = ctx.measureText('x').width ch = self._measure_text_height() import time t0 = time.time() ypos = 0 for line in self.text.splitlines(): ypos += ch + 2 ctx.fillText(line, 0, ypos) ctx.scale(ratio, ratio) #print(time.time() - t0) if __name__ == '__main__': m = app.launch(Editor, 'xul')
{ "repo_name": "JohnLunzer/flexx", "path": "exp/editor.py", "copies": "1", "size": "3890", "license": "bsd-2-clause", "hash": -8269569297625965000, "line_mean": 28.2481203008, "line_max": 69, "alpha_frac": 0.4341902314, "autogenerated": false, "ratio": 4.246724890829694, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5180915122229693, "avg_score": null, "num_lines": null }
from flexx import flx class UserInput(flx.PyWidget): def init(self): with flx.VBox(): self.edit = flx.LineEdit(placeholder_text='Your name') flx.Widget(flex=1) @flx.reaction('edit.user_done') def update_user(self, *events): new_text = self.root.store.username + "\n" + self.edit.text self.root.store.set_username(new_text) self.edit.set_text("") class SomeInfoWidget(flx.PyWidget): def init(self): with flx.FormLayout(): self.label = flx.Label(title='name:') flx.Widget(flex=1) @flx.reaction def update_label(self): self.label.set_text(self.root.store.username) class Store(flx.PyComponent): username = flx.StringProp(settable=True) class Example(flx.PyWidget): store = flx.ComponentProp() def init(self): # Create our store instance self._mutate_store(Store()) # Imagine this being a large application with many sub-widgets, # and the UserInput and SomeInfoWidget being used somewhere inside it. with flx.HSplit(): UserInput() flx.Widget(style='background:#eee;') SomeInfoWidget() if __name__ == '__main__': m = flx.launch(Example, 'default-browser', backend='flask') flx.run()
{ "repo_name": "zoofIO/flexx", "path": "flexxamples/howtos/python_side_widget2.py", "copies": "2", "size": "1321", "license": "bsd-2-clause", "hash": -7701091583442475000, "line_mean": 25.9795918367, "line_max": 78, "alpha_frac": 0.6078728236, "autogenerated": false, "ratio": 3.522666666666667, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.01670888010471675, "num_lines": 49 }
from flexx.util.testing import raises, run_tests_if_main import os import sys import tempfile from flexx.util.config import Config SAMPLE1 = """ foo = yes bar = 3 spam = 2.3 eggs = bla bla [other] bar = 9 """ SAMPLE2 = """ [testconfig] foo = yes bar = 4 spam = 3.3 eggs = bla bla bla [other] bar = 9 """ SAMPLE3 = """ <bullocks :: -= """ def test_config_name(): # Empty config c = Config('aa') assert len(c) == 0 # ok c = Config('AA') with raises(TypeError): Config() with raises(ValueError): Config(3) with raises(ValueError): Config('0aa') with raises(ValueError): Config('_aa') def test_defaults(): c = Config('testconfig', x01=(3, int, 'an int'), x02=(3, float, 'a float'), x03=('yes', bool, 'a bool'), x04=((1,2,3), str, 'A list of ints, as a string'), x05=((1,2,3), (int, ), 'A list of ints, as a tuple'), x06=((1,2,3), (str, ), 'A list of strings, as a tuple'), ) # Test iteration assert len(c) == 6 for name in c: assert name in ('x01', 'x02', 'x03', 'x04', 'x05', 'x06') assert set(dir(c)) == set([name for name in c]) # Test values assert c.x01 == 3 assert c.x02 == 3.0 assert c.x03 == True assert c.x04 == '(1, 2, 3)' assert c.x05 == (1, 2, 3) assert c.x06 == ('1', '2', '3') # Test docstring (e.g. alphabetic order) i1 = c.__doc__.find('x01') i2 = c.__doc__.find('x02') i3 = c.__doc__.find('x03') i4 = c.__doc__.find('x04') assert i1 > 0 assert i2 > i1 assert i3 > i2 assert i4 > i3 assert 'x01 (int): ' in c.__doc__ assert 'x04 (str): ' in c.__doc__ assert 'x05 (int-tuple): ' in c.__doc__ assert 'x06 (str-tuple): ' in c.__doc__ def test_option_spec_fail(): # ok Config('aa', foo=(3, int, '')) with raises(ValueError): Config('aa', _foo=(3, int, '')) for spec in [(), # too short (3, int), # still too short (3, int, 'docs', None), # too long (3, None, 'docs'), # type is not a type ('', set, 'docs'), # type is not supported ('3,3', [], 'docs'), # tuple type needs one element ('3,3', [int, int], 'docs'), # not two ('3,3', [set], 'docs'), # and must be supported ]: with raises(ValueError): Config('aa', foo=spec) def test_read_file(): # Prepare config files filename1 = os.path.join(tempfile.gettempdir(), 'flexx_config_test1.cfg') with open(filename1, 'wb') as f: f.write(SAMPLE1.encode()) filename2 = os.path.join(tempfile.gettempdir(), 'flexx_config_test2.cfg') with open(filename2, 'wb') as f: f.write(SAMPLE2.encode()) filename3 = os.path.join(tempfile.gettempdir(), 'flexx_config_test3.cfg') with open(filename3, 'wb') as f: f.write(SAMPLE3.encode()) filename4 = os.path.join(tempfile.gettempdir(), 'flexx_config_test4.cfg') with open(filename4, 'wb') as f: f.write(b'\x00\xff') # Config without sources c = Config('testconfig', foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.foo == False assert c.bar == 1 # Config with filename, implicit section c = Config('testconfig', filename1, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.foo == True assert c.bar == 3 assert c.eggs == 'bla bla' # Config with filename, explicit section c = Config('testconfig', filename2, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.foo == True assert c.bar == 4 assert c.eggs == 'bla bla bla' # Config with string, implicit section c = Config('testconfig', SAMPLE1, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.foo == True assert c.bar == 3 assert c.eggs == 'bla bla' # Config with string, explicit section c = Config('testconfig', SAMPLE2, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.foo == True assert c.bar == 4 assert c.eggs == 'bla bla bla' # Config with string, implicit section, different name c = Config('aaaa', SAMPLE1, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.foo == True assert c.bar == 3 # Config with string, explicit section, different name (no section match) c = Config('aaaa', SAMPLE2, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.foo == False assert c.bar == 1 # Config with both, and filenames can be nonexistent c = Config('testconfig', SAMPLE1, filename2, filename1+'.cfg', foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.bar == 4 # c = Config('testconfig', filename2, filename1+'.cfg', SAMPLE1, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) assert c.bar == 3 # Config from invalid string is ignored (logged) c = Config('testconfig', SAMPLE3, bar=(1, int, '')) assert c.bar == 1 # Config from invalid file is ignored (logged) c = Config('testconfig', filename3, bar=(1, int, '')) assert c.bar == 1 # Config from invalid unidocde file is ignored (logged) c = Config('testconfig', filename4, bar=(1, int, '')) assert c.bar == 1 # Fails with raises(ValueError): c = Config('testconfig', []) with raises(ValueError): c = Config('testconfig', 3) def test_read_file_later(): filename1 = os.path.join(tempfile.gettempdir(), 'flexx_config_test1.cfg') with open(filename1, 'wb') as f: f.write(SAMPLE1.encode()) filename2 = os.path.join(tempfile.gettempdir(), 'flexx_config_test2.cfg') with open(filename2, 'wb') as f: f.write(SAMPLE2.encode()) os.environ['TESTCONFIG_SPAM'] = '100' c = Config('testconfig', filename1, foo=(False, bool, ''), bar=(1, int, ''), spam=(0.0, float, ''), eggs=('', str, '')) del os.environ['TESTCONFIG_SPAM'] assert c.bar == 3 # from filename1 assert c.spam == 100 c.eggs = 'haha' c.spam = 10 c.load_from_file(filename2) assert c.bar == 4 # from filename2 assert c.eggs == 'haha' # from what we set - takes precedense assert c.spam == 10 # from what we set - precedense over env var def test_access(): c = Config('testconfig', foo=(1, int, ''), BAR=(1, int, '')) assert len(c) == 2 c.foo = 3 c.BAR = 4 assert c['foo'] == 3 assert c['BAR'] == 4 c['foO'] = 30 c['BAr'] = 40 assert c['FOO'] == 30 assert c['bar'] == 40 with raises(AttributeError): c.FOO with raises(AttributeError): c.bar with raises(TypeError): c[3] with raises(IndexError): c['optiondoesnotexist'] with raises(TypeError): c[3] = '' with raises(IndexError): c['optiondoesnotexist'] = '' def test_repr_and_str(): # Prepare file filename1 = os.path.join(tempfile.gettempdir(), 'flexx_config_test1.cfg') with open(filename1, 'wb') as f: f.write(SAMPLE1.encode()) c = Config('aaa', foo=(False, bool, ''), bar=(1, int, '')) r = repr(c) summary = str(c) summary1 = summary.splitlines()[0] # Test repr assert 'aaa' in r assert r.startswith('<') and r.endswith('>') assert '2' in r # shows how many options # Test first line of summary assert 'aaa' in summary1 assert '2' in summary1 assert 'default' in summary assert not 'set' in summary assert not 'string' in summary # set some c.bar = 2 summary = str(c) summary1 = summary.splitlines()[0] # Continue assert 'default' in summary assert 'set' in summary assert summary.count('default') == 2 # once for each opt assert summary.count('set') == 1 # once for one opt # Again, now with a file c = Config('aaa', filename1, foo=(False, bool, ''), bar=(1, int, '')) summary = str(c) summary1 = summary.splitlines()[0] # Test first line of summary assert 'aaa' in summary1 assert '2' in summary1 assert 'default' in summary assert filename1 in summary assert not 'set' in summary assert not 'string' in summary # Again, now with a string c = Config('aaa', SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) summary = str(c) summary1 = summary.splitlines()[0] # Test first line of summary assert 'aaa' in summary1 assert '2' in summary1 assert 'default' in summary assert filename1 not in summary assert not 'set' in summary assert 'string' in summary def test_set_from_cmdline(): old_argv = sys.argv try: sys.argv = '', '--aaa-bar=9' c = Config('aaa', SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) assert c.bar == 9 sys.argv = '', '--aAa-bAr=9' c = Config('aaa', SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) assert c.bar == 9 # case insensitive sys.argv = '', '--aaa-bar', '9' c = Config('aaa', SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) assert c.bar == 3 # need syntax using equals sign sys.argv = '', '--bar', '9' c = Config('aaa', SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) assert c.bar == 3 # neeed name prefix sys.argv = '', '--aaa-foo=1,2,3' c = Config('aaa', foo=([], [int], '')) assert c.foo == (1, 2, 3) finally: sys.argv = old_argv def test_set_from_env(): name = 'config_env_test' os.environ[name.upper() + '_' + 'BAR'] = '8' c = Config(name, SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) del os.environ[name.upper() + '_' + 'BAR'] assert c.bar == 8 os.environ[name + '-' + 'bar'] = '8' c = Config(name, SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) del os.environ[name + '-' + 'bar'] assert c.bar == 3 # must be uppercase os.environ[name.upper() + '-' + 'bar'] = '8' c = Config(name, SAMPLE1, foo=(False, bool, ''), bar=(1, int, '')) del os.environ[name.upper() + '-' + 'bar'] assert c.bar == 3 # should use underscore def test_order(): filename1 = os.path.join(tempfile.gettempdir(), 'flexx_config_test1.cfg') with open(filename1, 'wb') as f: f.write(SAMPLE1.encode()) filename2 = os.path.join(tempfile.gettempdir(), 'flexx_config_test2.cfg') with open(filename2, 'wb') as f: f.write(SAMPLE2.encode()) old_argv = sys.argv os.environ['TESTCONFIG_BAR'] = '5' sys.argv = '', '--testconfig-bar=6' try: c = Config('testconfig', filename1, filename2, bar=(2, int, '')) finally: del os.environ['TESTCONFIG_BAR'] sys.argv = old_argv c.bar = 7 s = str(c) indices1 = [s.index(' %i '%i) for i in [2, 3, 4, 5, 6, 7]] indices2 = [s.rindex(' %i '%i) for i in [2, 3, 4, 5, 6, 7]] indices3 = list(sorted(indices1)) assert indices1 == indices3 assert indices2 == indices3 def test_docstring(): c = Config('aaa', foo=(False, bool, ''), bar=(1, int, '')) assert 'aaa' in c.__doc__ assert 'foo (bool)' in c.__doc__ assert 'bar (int)' in c.__doc__ def test_bool(): c = Config('testconfig', foo=(True, bool, ''), bar=(False, bool, '')) assert c.foo == True c.foo = True assert c.foo == True c.foo = False assert c.foo == False for name in 'yes on true Yes On TRUE 1'.split(' '): c.foo = name assert c.foo == True for name in 'no off fAlse No Off FALSE 0'.split(' '): c.foo = name assert c.foo == False for name in 'none ok bla asdasdasd cancel'.split(' '): with raises(ValueError): c.foo = name for val in (1, 2, [2], None, 0, 0.0, 1.0, []): with raises(ValueError): c.foo = val def test_int(): c = Config('testconfig', foo=(1, int, ''), bar=('1', int, '')) assert c.foo == 1 assert c.bar == 1 c.foo = 12.1 assert c.foo == 12 c.foo = '7' assert c.foo == 7 c.foo = '-23' assert c.foo == -23 for val in ([], None, '1e2', '12.1', 'a'): with raises(ValueError): c.foo = val def test_float(): c = Config('testconfig', foo=(1, float, ''), bar=('1', float, '')) assert c.foo == 1.0 assert c.bar == 1.0 c.foo = 3 assert c.foo == 3.0 c.foo = -3.1 assert c.foo == -3.1 c.foo = '2e3' assert c.foo == 2000.0 c.foo = '12.12' assert c.foo == 12.12 for val in ([], None, 'a', '0a'): with raises(ValueError): c.foo = val def test_str(): c = Config('testconfig', foo=(1, str, ''), bar=((1,2,3), str, '')) assert c.foo == '1' assert c.bar == '(1, 2, 3)' c.foo = 3 assert c.foo == '3' c.foo = 3.1 assert c.foo == '3.1' c.foo = 'hello there, you!' assert c.foo == 'hello there, you!' c.foo = None assert c.foo == 'None' c.foo = False assert c.foo == 'False' c.foo = [] assert c.foo == '[]' def test_tuple(): c = Config('testconfig', foo=('1,2', [int], ''), bar=((1,2,3), [str], '')) assert c.foo == (1, 2) assert c.bar == ('1', '2', '3') c.foo = 1.2, 3.3, 5 assert c.foo == (1, 3, 5) c.foo = '(7, 8, 9)' assert c.foo == (7, 8, 9) c.foo = '1, 2,-3,4' assert c.foo == (1, 2, -3, 4) c.foo = [1, '2'] assert c.foo == (1, 2) for val in ([[]], [None], ['a'], ['0a'], ['1.2'], 3): with raises(ValueError): c.foo = val c.bar = 'hello, there, you ' assert c.bar == ('hello', 'there', 'you') c.bar = [1, '2'] assert c.bar == ('1', '2') run_tests_if_main()
{ "repo_name": "JohnLunzer/flexx", "path": "flexx/util/tests/test_config.py", "copies": "1", "size": "14672", "license": "bsd-2-clause", "hash": -1664432933977016300, "line_mean": 26.2713754647, "line_max": 78, "alpha_frac": 0.5086559433, "autogenerated": false, "ratio": 3.2926391382405744, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4301295081540574, "avg_score": null, "num_lines": null }
from flexx.util.testing import run_tests_if_main, raises from flexx import pyscript from flexx.pyscript import JSError, py2js, evaljs, evalpy, Parser def nowhitespace(s): return s.replace('\n', '').replace('\t', '').replace(' ', '') class TestParser(Parser): def function_foo_foo(self, node): return 'xxx' def method_bar_bar(self, node, base): return base class TestTheParser: def test_special_functions(self): assert TestParser("foo_foo()").dump() == 'xxx;' assert TestParser("bar_bar()").dump() == 'bar_bar();' assert TestParser("xxx.bar_bar()").dump() == 'xxx;' assert TestParser("xxx.foo_foo()").dump() == 'xxx.foo_foo();' def test_exceptions(self): raises(JSError, py2js, "foo(**kwargs)") class TestExpressions: """ Tests for single-line statements/expressions """ def test_special(self): assert py2js('') == '' assert py2js(' \n') == '' def test_ops(self): # Test code assert py2js('2+3') == '2 + 3;' # Binary assert py2js('2/3') == '2 / 3;' assert py2js('not 2') == '!2;' # Unary assert py2js('-(2+3)') == '-(2 + 3);' assert py2js('True and False') == 'true && false;' # Boolean # No parentices around names, numbers and strings assert py2js('foo - bar') == "foo - bar;" assert py2js('_foo3 - _bar4') == "_foo3 - _bar4;" assert py2js('3 - 4') == "3 - 4;" assert py2js('"abc" - "def"') == '"abc" - "def";' assert py2js("'abc' - 'def'") == '"abc" - "def";' assert py2js("'\"abc\" - \"def\"'") == '"\\"abc\\" - \\"def\\"";' # But they should be if it gets more complex assert py2js('foo - bar > 4') == "(foo - bar) > 4;" # Test outcome assert evalpy('2+3') == '5' # Binary assert evalpy('6/3') == '2' assert evalpy('4//3') == '1' assert evalpy('2**8') == '256' assert evalpy('not True') == 'false' # Unary assert evalpy('- 3') == '-3' assert evalpy('True and False') == 'false' # Boolean assert evalpy('True or False') == 'true' # Bug assert evalpy('(9-3-3)/3') == '1' # string formatting assert evalpy('"%s" % "bar"') == 'bar' assert evalpy('"-%s-" % "bar"') == '-bar-' assert evalpy('"foo %s foo" % "bar"') == 'foo bar foo' assert evalpy('"x %i" % 6') == 'x 6' assert evalpy('"x %f" % 6') == 'x 6' assert evalpy('"%s: %f" % ("value", 6)') == 'value: 6' assert evalpy('"%r: %r" % ("value", 6)') == '"value": 6' def test_overloaded_list_ops(self): assert evalpy('[1, 2] + [3, 4]') == '[ 1, 2, 3, 4 ]' assert evalpy('[3, 4] + [1, 2]') == '[ 3, 4, 1, 2 ]' assert evalpy('"ab" + "cd"') == 'abcd' assert evalpy('[3, 4] * 2') == '[ 3, 4, 3, 4 ]' assert evalpy('2 * [3, 4]') == '[ 3, 4, 3, 4 ]' assert evalpy('"ab" * 2') == 'abab' assert evalpy('2 * "ab"') == 'abab' assert evalpy('a = [1, 2]; a += [3, 4]; a') == '[ 1, 2, 3, 4 ]' assert evalpy('a = [3, 4]; a += [1, 2]; a') == '[ 3, 4, 1, 2 ]' assert evalpy('a = [3, 4]; a *= 2; a') == '[ 3, 4, 3, 4 ]' assert evalpy('a = "ab"; a *= 2; a') == 'abab' def test_raw_js_overloading(self): # more RawJS tests in test_parser3.py s1 = 'a=3; b=4; c=1; a + b - c' s2 = 'a=3; b=4; c=1; RawJS("a + b") - c' assert evalpy(s1) == '6' assert evalpy(s2) == '6' assert 'pyfunc' in py2js(s1) assert 'pyfunc' not in py2js(s2) def test_overload_funcs_dont_overload_real_funcs(self): assert evalpy('def add(a, b): return a-b\n\nadd(4, 1)') == '3' assert evalpy('def op_add(a, b): return a-b\n\nop_add(4, 1)') == '3' def test_comparisons(self): assert py2js('4 > 3') == '4 > 3;' assert py2js('4 is 3') == '4 === 3;' assert evalpy('4 > 4') == 'false' assert evalpy('4 >= 4') == 'true' assert evalpy('4 < 3') == 'false' assert evalpy('4 <= 4') == 'true' assert evalpy('4 == 3') == 'false' assert evalpy('4 != 3') == 'true' assert evalpy('4 == "4"') == 'true' # yuck! assert evalpy('4 is "4"') == 'false' assert evalpy('4 is not "4"') == 'true' assert evalpy('"c" in "abcd"') == 'true' assert evalpy('"x" in "abcd"') == 'false' assert evalpy('"x" not in "abcd"') == 'true' assert evalpy('3 in [1,2,3,4]') == 'true' assert evalpy('9 in [1,2,3,4]') == 'false' assert evalpy('9 not in [1,2,3,4]') == 'true' assert evalpy('"bar" in {"foo": 3}') == 'false' assert evalpy('"foo" in {"foo": 3}') == 'true' # was a bug assert evalpy('not (1 is null and 1 is null)') == 'true' def test_deep_comparisons(self): # List arr = '[(1,2), (3,4), (5,6), (1,2), (7,8)]\n' assert evalpy('a=' + arr + '(1,2) in a') == 'true' assert evalpy('a=' + arr + '(7,8) in a') == 'true' assert evalpy('a=' + arr + '(3,5) in a') == 'false' assert evalpy('a=' + arr + '3 in a') == 'false' assert evalpy('(2, 3) == (2, 3)') == 'true' assert evalpy('[2, 3] == [2, 3]') == 'true' assert evalpy('a=' + arr + 'b=' + arr + 'a==b') == 'true' # Dict dct = '{"a":7, 3:"foo", "bar": 1, "9": 3}\n' assert evalpy('d=' + dct + '"a" in d') == 'true' assert evalpy('d=' + dct + '"3" in d') == 'true' assert evalpy('d=' + dct + '3 in d') == 'true' assert evalpy('d=' + dct + '"bar" in d') == 'true' assert evalpy('d=' + dct + '9 in d') == 'true' assert evalpy('d=' + dct + '"9" in d') == 'true' assert evalpy('d=' + dct + '7 in d') == 'false' assert evalpy('d=' + dct + '"1" in d') == 'false' assert evalpy('{2: 3} == {"2": 3}') == 'true' assert evalpy('dict(foo=7) == {"foo": 7}') == 'true' assert evalpy('a=' + dct + 'b=' + dct + 'a==b') == 'true' assert evalpy('{"foo": 1, "bar": 2}=={"bar": 2, "foo": 1}') == 'true' assert evalpy('{"bar": 2, "foo": 1}=={"foo": 1, "bar": 2}') == 'true' # Deeper d1 = 'd1={"foo": [2, 3, {1:2,3:4,5:["aa", "bb"]}], "bar": None}\n' d2 = 'd2={"bar": None, "foo": [2, 3, {5:["aa", "bb"],1:2,3:4}]}\n' # same d3 = 'd3={"foo": [2, 3, {1:2,3:4,5:["aa", "b"]}], "bar": None}\n' # minus b assert evalpy(d1+d2+d3+'d1 == d2') == 'true' assert evalpy(d1+d2+d3+'d2 == d1') == 'true' assert evalpy(d1+d2+d3+'d1 != d2') == 'false' assert evalpy(d1+d2+d3+'d1 == d3') == 'false' assert evalpy(d1+d2+d3+'d1 != d3') == 'true' # assert evalpy(d1+d2+d3+'d2 in [2, d1, 4]') == 'true' assert evalpy(d1+d2+d3+'d2 in ("xx", d2, None)') == 'true' assert evalpy(d1+d2+d3+'d2 not in (1, d3, 2)') == 'true' assert evalpy(d1+d2+d3+'4 in [2, d1, 4]') == 'true' def test_truthfulness_of_basic_types(self): # Numbers assert evalpy('"T" if (1) else "F"') == 'T' assert evalpy('"T" if (0) else "F"') == 'F' # Strings assert evalpy('"T" if ("a") else "F"') == 'T' assert evalpy('"T" if ("") else "F"') == 'F' # None - undefined assert evalpy('None is null') == 'true' assert evalpy('None is undefined') == 'false' assert evalpy('undefined is undefined') == 'true' def test_truthfulness_of_array_and_dict(self): # Arrays assert evalpy('bool([1])') == 'true' assert evalpy('bool([])') == 'false' # assert evalpy('"T" if ([1, 2, 3]) else "F"') == 'T' assert evalpy('"T" if ([]) else "F"') == 'F' # assert evalpy('if [1]: "T"\nelse: "F"') == 'T' assert evalpy('if []: "T"\nelse: "F"') == 'F' # assert evalpy('if [1] and 1: "T"\nelse: "F"') == 'T' assert evalpy('if [] and 1: "T"\nelse: "F"') == 'F' assert evalpy('if [] or 1: "T"\nelse: "F"') == 'T' # assert evalpy('[2] or 42') == '[ 2 ]' assert evalpy('[] or 42') == '42' # Dicts assert evalpy('bool({1:2})') == 'true' assert evalpy('bool({})') == 'false' # assert evalpy('"T" if ({"foo": 3}) else "F"') == 'T' assert evalpy('"T" if ({}) else "F"') == 'F' # assert evalpy('if {1:2}: "T"\nelse: "F"') == 'T' assert evalpy('if {}: "T"\nelse: "F"') == 'F' # assert evalpy('if {1:2} and 1: "T"\nelse: "F"') == 'T' assert evalpy('if {} and 1: "T"\nelse: "F"') == 'F' assert evalpy('if {} or 1: "T"\nelse: "F"') == 'T' # assert evalpy('{1:2} or 42') == "{ '1': 2 }" assert evalpy('{} or 42') == '42' assert evalpy('{} or 0') == '0' assert evalpy('None or []') == '[]' # Eval extra types assert evalpy('null or 42') == '42' assert evalpy('ArrayBuffer(4) or 42') != '42' # No bools assert py2js('if foo: pass').count('_truthy') assert py2js('if foo.length: pass').count('_truthy') == 0 assert py2js('if 3: pass').count('_truthy') == 0 assert py2js('if True: pass').count('_truthy') == 0 assert py2js('if a == 3: pass').count('_truthy') == 0 assert py2js('if a is 3: pass').count('_truthy') == 0 def test_indexing_and_slicing(self): c = 'a = [1, 2, 3, 4, 5]\n' # Indexing assert evalpy(c + 'a[2]') == '3' assert evalpy(c + 'a[-2]') == '4' # Slicing assert evalpy(c + 'a[:]') == '[ 1, 2, 3, 4, 5 ]' assert evalpy(c + 'a[1:-1]') == '[ 2, 3, 4 ]' def test_assignments(self): assert py2js('foo = 3') == 'var foo;\nfoo = 3;' # with var assert py2js('foo.bar = 3') == 'foo.bar = 3;' # without var assert py2js('foo[i] = 3') == 'foo[i] = 3;' # without var code = py2js('foo = 3; bar = 4') # define both assert code.count('var') == 1 code = py2js('foo = 3; foo = 4') # only define first time assert code.count('var') == 1 code = py2js('foo = bar = 3') # multiple assignment assert 'foo = bar = 3' in code assert 'var bar, foo' in code # alphabetic order # self -> this assert py2js('self') == 'this;' assert py2js('self.foo') == 'this.foo;' # Indexing assert evalpy('a=[0,0]\na[0]=2\na[1]=3\na', False) == '[2,3]' # Tuple unpacking evalpy('x=[1,2,3]\na, b, c = x\nb', False) == '2' evalpy('a,b,c = [1,2,3]\nc,b,a = a,b,c\n[a,b,c]', False) == '[3,2,1]' # For unpacking, test that variables are declared, but not when attr or index assert py2js('xx, yy = 3, 4').count('xx') == 2 assert py2js('xx[0], yy[0] = 3, 4').count('xx') == 1 assert py2js('xx.a, yy.a = 3, 4').count('xx') == 1 # Class variables don't get a var code = py2js('class Foo:\n bar=3\n bar = bar + 1') assert code.count('bar') == 3 assert code.count('Foo.Ƥ.bar') == 3 def test_aug_assignments(self): # assign + bin op assert evalpy('x=5; x+=1; x') == '6' assert evalpy('x=5; x/=2; x') == '2.5' assert evalpy('x=5; x**=2; x') == '25' assert evalpy('x=5; x//=2; x') == '2' def test_basic_types(self): assert py2js('True') == 'true;' assert py2js('False') == 'false;' assert py2js('None') == 'null;' assert py2js('"bla\\"bla"') == '"bla\\"bla";' assert py2js('3') == '3;' assert py2js('3.1415') == '3.1415;' assert py2js('[1,2,3]') == '[1, 2, 3];' assert py2js('(1,2,3)') == '[1, 2, 3];' assert py2js('{foo: 3, bar: 4}') == '{foo: 3, bar: 4};' def test_ignore_import_of_compiler(self): modname = pyscript.__name__ assert py2js('from %s import x, y, z\n42' % modname) == '42;' def test_import(self): with raises(JSError): py2js('import time') # But we do support special time funcs import time assert abs(float(evalpy('time()')) - time.time()) < 0.5 evalpy('t0=perf_counter(); t1=perf_counter(); (t1-t0)').startswith('0.0') def test_funcion_call(self): jscode = 'var foo = function (x, y) {return x+y;};' assert evaljs(jscode + py2js('foo(2,2)')) == '4' assert evaljs(jscode + py2js('foo("so ", True)')) == 'so true' assert evaljs(jscode + py2js('a=[1,2]; foo(*a)')) == '3' assert evaljs(jscode + py2js('a=[1,2]; foo(7, *a)')) == '8' # Test super (is tested for real in test_parser3.py assert evalpy('d={"_base_class": console};d._base_class.log(4)') == '4' assert evalpy('d={"_base_class": console};d._base_class.log()') == '' jscode = 'var foo = function () {return this.val};' jscode += 'var d = {"foo": foo, "val": 7};\n' assert evaljs(jscode + py2js('d["foo"]()')) == '7' assert evaljs(jscode + py2js('d["foo"](*[3, 4])')) == '7' def test_instantiation(self): # Test creating instances assert 'new' in py2js('a = Bar()') assert 'new' in py2js('a = x.Bar()') assert 'new' not in py2js('a = foo()') assert 'new' not in py2js('a = _foo()') assert 'new' not in py2js('a = _Foo()') assert 'new' not in py2js('a = this.Bar()') assert 'new' not in py2js('a = JSON.stringify(x)') jscode = 'function Bar() {this.x = 3}\nvar x=1;\n' assert evaljs(jscode + py2js('a=Bar()\nx')) == '1' # Existing classes and functions are used to determine if a # call is an instantiation assert 'new' in py2js('class foo:pass\na = foo()') assert 'new' not in py2js('class foo:pass\ndef foo():pass\na = foo()') assert 'new' not in py2js('def foo():pass\nclass foo:pass\na = foo()') # assert 'new' not in py2js('def Bar():pass\na = Bar()') assert 'new' in py2js('def Bar():pass\nclass Bar:pass\na = Bar()') assert 'new' in py2js('class Bar:pass\ndef Bar():pass\na = Bar()') def test_pass(self): assert py2js('pass') == '' def test_delete(self): assert evalpy('d={}\nd.foo=3\n\nd') == "{ foo: 3 }" assert evalpy('d={}\nd.foo=3\ndel d.foo\nd') == '{}' assert evalpy('d={}\nd.foo=3\nd.bar=3\ndel d.foo\nd') == '{ bar: 3 }' assert evalpy('d={}\nd.foo=3\nd.bar=3\ndel d.foo, d["bar"]\nd') == '{}' class TestModules: def test_module(self): code = Parser('"docstring"\nfoo=3;bar=4;_priv=0;', 'foo.py').dump() # Has docstring assert code.count('// docstring') == 1 run_tests_if_main()
{ "repo_name": "JohnLunzer/flexx", "path": "flexx/pyscript/tests/test_parser1.py", "copies": "1", "size": "15251", "license": "bsd-2-clause", "hash": -8088106578414902000, "line_mean": 38.6103896104, "line_max": 85, "alpha_frac": 0.4626229508, "autogenerated": false, "ratio": 2.979097479976558, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8814420371920169, "avg_score": 0.025460011771277782, "num_lines": 385 }
from flexx.util.testing import run_tests_if_main, raises from flexx import pyscript from flexx.pyscript import RawJS def test_stubs(): from flexx.pyscript.stubs import window, undefined, omgnotaname assert isinstance(window, pyscript.JSConstant) assert isinstance(undefined, pyscript.JSConstant) assert isinstance(omgnotaname, pyscript.JSConstant) def test_raw_js(): with raises(TypeError): RawJS() with raises(TypeError): RawJS(3) # Empty r1 = RawJS('') assert str(r1) == '' assert r1.get_code() == '' assert r1.get_code(4) == '' assert '0' in repr(r1) assert r1.__module__.endswith(__name__) # Short single line r2 = RawJS('require("foobar")') assert 'require(' in repr(r2) assert 'require(' in str(r2) assert r2.get_code().startswith('require') assert r2.get_code(4).startswith(' require') assert r2.get_code(2).startswith(' require') assert '\n' not in r2.get_code() # Long single line r2b = RawJS('require("foobar")'*10) assert 'require(' not in repr(r2b) assert '1' in repr(r2b) # Multiline, start at first line r3 = RawJS("""for ... { yyyy } """) assert 'lines' in repr(r3) assert 'for ...' in str(r3) assert str(r3).endswith('}\n') assert r3.get_code().count('\n') == 3 assert r3.get_code().startswith('for') assert r3.get_code(4).startswith(' for') assert '\n yyyy\n' in r3.get_code(0) assert '\n yyyy\n' in r3.get_code(4) # Multiline, exactly the same, but start at second line; same results r4 = RawJS(""" for ... { yyyy } """) assert 'lines' in repr(r4) assert 'for ...' in str(r4) assert str(r4).endswith('}\n') assert r4.get_code().count('\n') == 3 assert r4.get_code().startswith('for') assert r4.get_code(4).startswith(' for') assert '\n yyyy\n' in r4.get_code(0) assert '\n yyyy\n' in r4.get_code(4) # Multiline, now newline at the ned r5 = RawJS(""" for ... { yyyy }""") assert r5.get_code().count('\n') == 2 assert str(r5).endswith('}') run_tests_if_main()
{ "repo_name": "JohnLunzer/flexx", "path": "flexx/pyscript/tests/test_stubs.py", "copies": "1", "size": "2290", "license": "bsd-2-clause", "hash": 1642852438011168000, "line_mean": 26.9268292683, "line_max": 73, "alpha_frac": 0.5620087336, "autogenerated": false, "ratio": 3.239038189533239, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.43010469231332393, "avg_score": null, "num_lines": null }
from flexx.util.testing import run_tests_if_main, raises from flexx.pyscript.parser0 import JSError, unify from flexx import pyscript def test_unify(): # Simple objects assert unify('3') == '3' assert unify('3.12') == '3.12' assert unify('"aa"') == '"aa"' assert unify("'aa'") == "'aa'" # Simple names assert unify('foo') == 'foo' assert unify('foo.bar') == 'foo.bar' assert unify('foo_12') == 'foo_12' # Simple calls assert unify('foo()') == 'foo()' assert unify('bar.fo_o()') == 'bar.fo_o()' # Anything that already has braces or [] assert unify('(foo)') == '(foo)' assert unify('(3 + 3)') == '(3 + 3)' assert unify('[2, 3]') == '[2, 3]' # Func calls with args (but no extra braces) assert unify('xxxxx(some args bla)') == 'xxxxx(some args bla)' assert unify('foo(3)') == 'foo(3)' # Indexing assert unify('foo[1]') == 'foo[1]' assert unify('bar.foo[1:2,3]') == 'bar.foo[1:2,3]' # Dict assert unify('{a:3, b:"5"}') == '{a:3, b:"5"}' # Otherwise ... braces! assert unify('3+3') == '(3+3)' assert unify('(3)+(3)') == '((3)+(3))' assert unify('[3]+[3]') == '([3]+[3])' assert unify('foo((3))') == '(foo((3)))' assert unify('bar+foo(3)') == '(bar+foo(3))' assert unify('b + {a:3}') == '(b + {a:3})' run_tests_if_main()
{ "repo_name": "JohnLunzer/flexx", "path": "flexx/pyscript/tests/test_parser0.py", "copies": "1", "size": "1385", "license": "bsd-2-clause", "hash": -5005617526395254000, "line_mean": 27.2653061224, "line_max": 66, "alpha_frac": 0.5090252708, "autogenerated": false, "ratio": 2.867494824016563, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8713254788694115, "avg_score": 0.0326530612244898, "num_lines": 49 }
from flexx.util.testing import run_tests_if_main, raises import sys from flexx import app from flexx.app import Session from flexx.app._assetstore import assets, AssetStore as _AssetStore class AssetStore(_AssetStore): _test_mode = True class Fooo1(app.Model): x = 3 def test_session_basics(): s = Session('xx') assert s.app_name == 'xx' assert 'xx' in repr(s) def test_get_model_instance_by_id(): # is really a test for the session, but historically, the test is done here # This test needs a default session session = app.manager.get_default_session() if session is None: session = app.manager.create_default_session() m1 = Fooo1() m2 = Fooo1() assert m1 is not m2 assert session.get_model_instance_by_id(m1.id) is m1 assert session.get_model_instance_by_id(m2.id) is m2 assert session.get_model_instance_by_id('blaaaa') is None def test_session_assets_data(): store = AssetStore() store.add_shared_data('ww', b'wwww') s = Session('', store) s._send_command = lambda x: None assert s.id # Add data s.add_data('xx', b'xxxx') s.add_data('yy', b'yyyy') assert len(s.get_data_names()) == 2 assert 'xx' in s.get_data_names() assert 'yy' in s.get_data_names() # get_data() assert s.get_data('xx') == b'xxxx' assert s.get_data('zz') is None assert s.get_data('ww') is b'wwww' # # Add url data # s.add_data('readme', 'https://github.com/zoofIO/flexx/blob/master/README.md') # #assert 'Flexx is' in s.get_data('readme').decode() # assert s.get_data('readme').startswith('https://github') # Add data with same name with raises(ValueError): s.add_data('xx', b'zzzz') # Add BS data with raises(TypeError): s.add_data('dd') # no data with raises(TypeError): s.add_data('dd', 4) # not an asset if sys.version_info > (3, ): with raises(TypeError): s.add_data('dd', 'not bytes') with raises(TypeError): s.add_data(b'dd', b'yes, bytes') # name not str with raises(TypeError): s.add_data(4, b'zzzz') # name not a str # get_data() assert s.get_data('xx') is b'xxxx' assert s.get_data('ww') is store.get_data('ww') assert s.get_data('ww') == b'wwww' assert s.get_data('bla') is None def test_session_registering_model_classes(): from flexx import ui store = AssetStore() store.update_modules() s = Session('', store) commands = [] s._send_command = lambda x: commands.append(x) assert not s.present_modules s._register_model_class(ui.Button) assert len(s.present_modules) == 2 assert 'flexx.ui._widget' in s.present_modules assert 'flexx.ui.widgets._button' in s.present_modules assert len(s._present_classes) == 6 # Because a module was loaded that has more widgets assert ui.Button in s._present_classes assert ui.RadioButton in s._present_classes assert ui.CheckBox in s._present_classes assert ui.ToggleButton in s._present_classes assert ui.BaseButton in s._present_classes assert ui.Widget in s._present_classes with raises(TypeError): s._register_model_class(3) ## Prepare module loading tests from flexx.app._model import new_type PKG_NAME = 'flxtest2' def add_prefix(n): if isinstance(n, list): return [add_prefix(i) for i in n] elif n.startswith('foo.'): return PKG_NAME + '.' + n else: return n def teardown_module(): clear_test_classes() def clear_test_classes(): for cls in list(app.Model.CLASSES): if cls.__jsmodule__.startswith(PKG_NAME + '.'): app.Model.CLASSES.remove(cls) def fakemodel_init(self, s): self._session = s self._id = 'FakeModel' def fakemodel_setattr(self, s, v): return object.__setattr__(self, s, v) def fakemodel_del(self): pass Model_overload = dict(__init__=fakemodel_init, __setattr__=fakemodel_setattr, __del__=fakemodel_del, ) class SessionTester(Session): """ A session subclass that keeps track of DEFINE commands. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.assets_js = [] self.assets_css = [] def _send_command(self, command): if command.startswith('DEFINE-JS'): _, name, _ = command.split(' ', 2) self.assets_js.append(name) elif command.startswith('DEFINE-CSS'): _, name, _ = command.split(' ', 2) self.assets_css.append(name) class FakeModule: """ An object that looks and walks like a JSModule. Enough to fool Flexx' internals. """ def __init__(self, store, name): self.name = add_prefix(name) self.deps = set() self.model_classes = set() store._modules[self.name] = self b1 = app.Bundle(self.name + '.js') b2 = app.Bundle(self.name + '.css') b1.add_module(self) b2.add_module(self) store._assets[b1.name] = b1 store._assets[b2.name] = b2 def make_model_class(self, name, base=app.Model): cls = new_type(name, (base, ), Model_overload) self.model_classes.add(cls) cls.__module__ = self.name cls.__jsmodule__ = self.name self.deps.add(base.__jsmodule__) return cls def add_variable(self, name): assert name in [m.__name__ for m in self.model_classes] def get_js(self): return self.name + '-JS' def get_css(self): return self.name + '-CSS' ## Test module loading def test_module_loading1(): """ Simple case. """ clear_test_classes() store = AssetStore() s = SessionTester('', store) m1 = FakeModule(store, 'foo.m1') m2 = FakeModule(store, 'foo.m2') Ma = m1.make_model_class('Ma') Mb = m1.make_model_class('Mb') Mc = m2.make_model_class('Mc') s._register_model(Ma(s)) s._register_model(Mb(s)) s._register_model(Mc(s)) assert s.assets_js == add_prefix(['foo.m1.js', 'foo.m2.js']) assert s.assets_css == add_prefix(['foo.m1.css', 'foo.m2.css']) def test_module_loading2(): """ No deps """ clear_test_classes() store = AssetStore() s = SessionTester('', store) m1 = FakeModule(store, 'foo.m1') m2 = FakeModule(store, 'foo.m2') m3 = FakeModule(store, 'foo.m3') Ma = m2.make_model_class('Ma') # m2.deps = add_prefix(['foo.m3']) # m3.deps = add_prefix(['foo.m1']) s._register_model(Ma(s)) assert s.assets_js == add_prefix(['foo.m2.js']) def test_module_loading3(): """ Dependencies get defined too (and before) """ clear_test_classes() store = AssetStore() s = SessionTester('', store) m1 = FakeModule(store, 'foo.m1') m2 = FakeModule(store, 'foo.m2') m3 = FakeModule(store, 'foo.m3') Ma = m2.make_model_class('Ma') m2.deps = add_prefix(['foo.m3']) m3.deps = add_prefix(['foo.m1']) s._register_model(Ma(s)) assert s.assets_js == add_prefix(['foo.m1.js', 'foo.m3.js', 'foo.m2.js']) def test_module_loading4(): """ Dependencies by inheritance """ # A bit silly; the JSModule (and our FakeModule) handles this dependency clear_test_classes() store = AssetStore() s = SessionTester('', store) m1 = FakeModule(store, 'foo.m1') m2 = FakeModule(store, 'foo.m2') m3 = FakeModule(store, 'foo.m3') Ma = m2.make_model_class('Ma') Mb = m3.make_model_class('Mb', Ma) Mc = m1.make_model_class('Mc', Mb) s._register_model(Mc(s)) assert s.assets_js == add_prefix(['foo.m2.js', 'foo.m3.js', 'foo.m1.js']) def test_module_loading5(): """ Associated assets """ # A bit silly; the JSModule (and our FakeModule) handles this dependency clear_test_classes() store = AssetStore() s = SessionTester('', store) m1 = FakeModule(store, 'foo.m1') m2 = FakeModule(store, 'foo.m2') m3 = FakeModule(store, 'foo.m3') store.add_shared_asset('spam.js', 'XX') store.associate_asset(add_prefix('foo.m1'), 'spam.js') store.associate_asset(add_prefix('foo.m2'), 'eggs.js', 'YY') store.associate_asset(add_prefix('foo.m2'), 'spam.js') store.associate_asset(add_prefix('foo.m2'), 'bla.css', 'ZZ') store.associate_asset(add_prefix('foo.m3'), 'bla.css') Ma = m1.make_model_class('Ma') Mb = m2.make_model_class('Mb') Mc = m3.make_model_class('Mc') s._register_model(Ma(s)) s._register_model(Mb(s)) s._register_model(Mc(s)) assert s.assets_js == add_prefix(['spam.js', 'foo.m1.js', 'eggs.js', 'foo.m2.js', 'foo.m3.js']) assert s.assets_css == add_prefix(['foo.m1.css', 'bla.css', 'foo.m2.css', 'foo.m3.css']) # clear_test_classes() # test_module_loading5() # clear_test_classes() run_tests_if_main()
{ "repo_name": "JohnLunzer/flexx", "path": "flexx/app/tests/test_session.py", "copies": "1", "size": "9097", "license": "bsd-2-clause", "hash": -5543440167746337000, "line_mean": 26.1552238806, "line_max": 99, "alpha_frac": 0.5879960427, "autogenerated": false, "ratio": 3.173003139169864, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.42609991818698645, "avg_score": null, "num_lines": null }
from flickrBase import * #Gets information about a photoset. class FlickrPhotoSetGetInfo(FlickrApiMethod): name='flickr.photosets.getInfo' def __init__(self,nojsoncallback=True,format='json',parameters=None,photoset_id=None): self.photoset_id = photoset_id FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name, 'photoset_id':self.photoset_id } return p #Returns the photosets belonging to the specified user. class FlickrPhotoSetsGetList(FlickrApiMethod): name='flickr.photosets.getList' def __init__(self,nojsoncallback=True,format='json',parameters=None,user_id=None): FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name } return p def getSetIDs(self): l=[] for o in self.json["photosets"]["photoset"]: l.append(o["id"]) return l #Get the list of photos in a set. class FlickrPhotoSetsGetPhotos(FlickrApiMethod): name='flickr.photosets.getPhotos' def __init__(self,nojsoncallback=True,format='json',parameters=None,photoset_id=None,page=1): self.photoset_id = photoset_id self.page = page FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name, 'media':'photos', 'per_page':500, 'page':self.page, 'photoset_id':self.photoset_id } return p def getPhotoIds(self): l =[] if(self.loaded): for o in self.json["photoset"]["photo"]: l.append(o["id"]) while(self.page < self.json["photoset"]["pages"]): self.page = self.page + 1 if(self.makeCall()): for o in self.json["photoset"]["photo"]: l.append(o["id"]) return l
{ "repo_name": "kelsmj/FlickrOAuth", "path": "photosets.py", "copies": "1", "size": "1739", "license": "mit", "hash": 3445396362209054700, "line_mean": 25.3636363636, "line_max": 94, "alpha_frac": 0.6877515814, "autogenerated": false, "ratio": 2.8461538461538463, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4033905427553846, "avg_score": null, "num_lines": null }
from flickrBase import * #Returns all visible sets and pools the photo belongs to. class FlickrPhotosGetAllContexts(FlickrApiMethod): name = 'flickr.photos.getAllContexts' def __init__(self,nojsoncallback=True,format='json',parameters=None,photo_id=None): self.photo_id=photo_id FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name, 'photo_id':self.photo_id } return p #Fetch a list of recent photos from the calling users' contacts. class FlickrPhotosGetContactsPhotos(FlickrApiMethod): name = 'flickr.photos.getContactsPhotos' def __init__(self,nojsoncallback=True,format='json',parameters=None): FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ #For some reason this api call expects #oauth_signature_method as a parameter 'oauth_signature_method': "HMAC-SHA1", 'method':self.name } return p #Fetch a list of recent public photos from a users' contacts. class FlickrPhotosGetContactsPublicPhotos(FlickrApiMethod): name = 'flickr.photos.getContactsPublicPhotos' def __init__(self,nojsoncallback=True,format='json',parameters=None,user_id=None): self.user_id=user_id FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name, 'user_id':self.user_id } return p #Returns next and previous photos for a photo in a photostream. class FlickrPhotosGetContext(FlickrApiMethod): name = 'flickr.photos.getContext' def __init__(self,nojsoncallback=True,format='json',parameters=None,photo_id=None): self.photo_id=photo_id FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name, 'photo_id':self.photo_id } return p #Gets a list of photo counts for the given date ranges for the calling user. class FlickrPhotosGetCounts(FlickrApiMethod): name = 'flickr.photos.getCounts' def __init__(self,nojsoncallback=True,format='json',parameters=None): FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name } return p #Returns the available sizes for a photo. The calling user must have permission to view the photo. class FlickrPhotosGetSizes(FlickrApiMethod): name ='flickr.photos.getSizes' def __init__(self,nojsoncallback=True,format='json',parameters=None,photo_id=None): self.photo_id = photo_id FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p ={ 'method':self.name, 'photo_id':self.photo_id } return p def writePhotos(self): for o in self.json["sizes"]["size"]: opener = urllib2.build_opener() page = opener.open(o["source"]) my_picture = page.read() dir = './pictures/' + o["label"] if not os.path.exists(dir): os.makedirs(dir) filename = self.photo_id + o["source"][-4:] print filename fout = open(os.path.join(dir,filename),"wb") fout.write(my_picture) fout.close() #Get information about a photo. The calling user must have permission to view the photo. class FlickrPhotosGetInfo(FlickrApiMethod): name = 'flickr.photos.getInfo' def __init__(self,nojsoncallback=True,format='json',parameters=None,photo_id=None): self.photo_id = photo_id FlickrApiMethod.__init__(self,nojsoncallback,format,parameters) def getParameters(self): p={ 'method':self.name, 'photo_id':self.photo_id } return p
{ "repo_name": "kelsmj/FlickrOAuth", "path": "photos.py", "copies": "1", "size": "3526", "license": "mit", "hash": 12193746339864444, "line_mean": 27.435483871, "line_max": 98, "alpha_frac": 0.7220646625, "autogenerated": false, "ratio": 2.9755274261603377, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8253511616080307, "avg_score": 0.18881609451600617, "num_lines": 124 }
from flickrDownloader import * api_key = "INSERT_HERE_YOUR_API_KEY" # if you want to insert your apikey in source code api_key = "flickr.apikey" # if you want to read apikey from file # If you want to share your code in git, you may want not to share your api key too! # In that case, insert your api key in the flickr.apikey file and add flickr.apikey in your .gitignore # Available licenses: (from: https://www.flickr.com/services/api/explore/flickr.photos.licenses.getInfo) # # {"id": 0, "name": "All Rights Reserved", "url": ""}, # {"id": 4, "name": "Attribution License", "url": "https:\/\/creativecommons.org\/licenses\/by\/2.0\/"}, # {"id": 6, "name": "Attribution-NoDerivs License", "url": "https:\/\/creativecommons.org\/licenses\/by-nd\/2.0\/"}, # {"id": 3, "name": "Attribution-NonCommercial-NoDerivs License", "url": "https:\/\/creativecommons.org\/licenses\/by-nc-nd\/2.0\/"}, # {"id": 2, "name": "Attribution-NonCommercial License", "url": "https:\/\/creativecommons.org\/licenses\/by-nc\/2.0\/"}, # {"id": 1, "name": "Attribution-NonCommercial-ShareAlike License", "url": "https:\/\/creativecommons.org\/licenses\/by-nc-sa\/2.0\/"}, # {"id": 5, "name": "Attribution-ShareAlike License", "url": "https:\/\/creativecommons.org\/licenses\/by-sa\/2.0\/"}, # {"id": 7, "name": "No known copyright restrictions", "url": "https:\/\/www.flickr.com\/commons\/usage\/"}, # {"id": 8, "name": "United States Government Work", "url": "http:\/\/www.usa.gov\/copyright.shtml"}, # {"id": 9, "name": "Public Domain Dedication (CC0)", "url": "https:\/\/creativecommons.org\/publicdomain\/zero\/1.0\/"}, # {"id": 10, "name": "Public Domain Mark", "url": "https:\/\/creativecommons.org\/publicdomain\/mark\/1.0\/"} license_id = 10 # "using public domain mark" license link_list = flickr_photos_downloader(api_key, query_text="david michelangelo", # tags="", tag_mode=FlickrTagMode.all, download_path="michelangelo_download", image_size=FlickrImageSize.square_150x150, n_images=100, verbose=True, license_id=license_id) flickr_photos_downloader(api_key, n_images=10, query_text="Firenze", tags="Art", tag_mode=FlickrTagMode.any, image_size=FlickrImageSize.longedge_1600, content_type=FlickrContentType.photos, media=FlickrMedia.photos, download_path="img_downloads", save_filename_prefix="flickr_downloaded_", forced_extension=None, verbose=True, ignore_errors=False, license_id=license_id) only_link = flickr_photos_links(api_key, n_images=1500, query_text="Firenze", tags="Art", tag_mode=FlickrTagMode.any, image_size=FlickrImageSize.longedge_1600, content_type=FlickrContentType.photos, media=FlickrMedia.photos, verbose=True, ignore_errors=False, license_id=license_id) for i, link in enumerate(only_link): print str(i) + "\t-\t" + link responsesJson = flickr_photos_search(api_key, n_images=1500, query_text="Firenze", tags="Art", tag_mode=FlickrTagMode.any, content_type=FlickrContentType.photos, media=FlickrMedia.photos, response_format=FlickrResponseFormat.JSON, license_id=license_id)
{ "repo_name": "nagash91/python-flickr-image-downloader", "path": "main_usage_examples.py", "copies": "1", "size": "4248", "license": "mit", "hash": -8719619767522959000, "line_mean": 53.4615384615, "line_max": 136, "alpha_frac": 0.5035310734, "autogenerated": false, "ratio": 3.8583106267029974, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.978953837128306, "avg_score": 0.014460665763987506, "num_lines": 78 }
from FlightInfo import FlightInfo from Interval import * from Gate import Gate from Queue import * from random import * def assign(intervals, num_gates=0, start_time=scheduled_start_time, end_time=scheduled_end_time): gates = [Gate() for i in xrange(0, num_gates)] # initialise priority queue pq = PriorityQueue() for gate in gates: pq.put((-9999, gate)) sorted_intervals = sorted(intervals, key=start_time) while sorted_intervals: interval = sorted_intervals.pop(0); if not pq.empty(): end_time_gate = pq.get() earliest_end_time = end_time_gate[0] gate = end_time_gate[1] if earliest_end_time <= start_time(interval): gate.append(interval) pq.put((end_time(interval), gate)) continue pq.put(end_time_gate) pq.put((end_time(interval), Gate(interval))) gates = [] while not pq.empty(): gates.append(pq.get()[1]) return gates # delays flights with probability p by a number in [min_delay, max_delay] in minutes def delay_flight_infos(flight_infos, p, min_delay, max_delay): for flight_info in flight_infos: if random() < p: dx = randint(min_delay, max_delay) flight_info.add_delay(dx) else: flight_info.add_delay(0) # reassigns intervals in gates if there are collisions # returns [gates, overflow gates, # of reassignments] def reassign(gates, intervals): def get_slack(gate, index, interval): start = gate[index+1].scheduled_start_time if index < len(gate)-1 else 1439 return start - interval.delayed_end_time interval_to_gate = {} for gate in gates: for i, interval in enumerate(gate): interval_to_gate[interval] = gate reassign_count = 0 overflow_gates = [] sorted_intervals = sorted(intervals, key=delayed_start_time) while sorted_intervals: interval = sorted_intervals.pop(0) if interval.delayed(): gate = interval_to_gate[interval] index = gate.index(interval) # check for collisions, maybe no need to reassign collision = ((index > 0 and not gate[index-1].delayed_end_time <= interval.delayed_start_time) or (index+1 < len(gate) and not interval.delayed_end_time <= gate[index+1].scheduled_start_time)) if not collision: continue # find gate with most slack to reassign to gate.remove(interval) most_slack_gate = None most_slack_index = None most_slack = None for gate2 in gates + overflow_gates: index = gate2.free_index(interval) if index < 0: # no free slots continue slack = get_slack(gate2, index, interval) if most_slack is None or most_slack < slack: most_slack_gate = gate2 most_slack_index = index most_slack = slack if most_slack is None: # no gates are free overflow_gates.append(Gate(interval)) else: most_slack_gate.insert(most_slack_index, interval) reassign_count += 1 return [gates, overflow_gates, reassign_count]
{ "repo_name": "sozos/RAS", "path": "source/schedule.py", "copies": "1", "size": "2831", "license": "mit", "hash": -1500437533526236700, "line_mean": 30.1098901099, "line_max": 98, "alpha_frac": 0.6976333451, "autogenerated": false, "ratio": 3.0342979635584135, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9036411280287526, "avg_score": 0.03910400567417748, "num_lines": 91 }
from FlightInfo import * from Interval import * # Converts time from 24h format to minutes in the range [0,1439] def time_to_minutes(time_string): t = int(time_string) t_hr = t / 100 t_min = t % 100 return t_hr * 60 + t_min # Add duration to time t def add_time(t, duration): return t + duration # Returns the time difference in minutes, time2 must be equal or later to time1 def time_diff(time1, time2): return int(math.fabs(time2 - time1)) # prints the avg probablility p of being delayed, and the avg delays defined by |depart - actual| def get_stats(): total_delay = 0 total_p = 0 total_flights = 0 for date in dates: airport_to_flight_infos = get_airports_from_date('test_data.csv', date) for airport, flight_infos in airport_to_flight_infos.items(): total_delay += sum([flight_info.delay() for flight_info in flight_infos]) total_p += sum([1 if flight_info.delayed() else 0 for flight_info in flight_infos]) total_flights += len(flight_infos) print 'total_flights', total_flights print 'avg_delay', float(total_delay)/total_flights print 'avg_p', float(total_p)/total_flights # returns the avg delays of an airport def avg_delay(flight_infos): assert isinstance(flight_infos, list) return float(sum([flight_info.delay() for flight_info in flight_infos]))/len(flight_infos) # returns the intervals from flight_infos def intervals(flight_infos): return [Interval(flight_info) for flight_info in flight_infos]
{ "repo_name": "sozos/RAS", "path": "source/util.py", "copies": "1", "size": "1447", "license": "mit", "hash": 6834640540741015000, "line_mean": 34.3170731707, "line_max": 97, "alpha_frac": 0.7277125086, "autogenerated": false, "ratio": 2.9651639344262297, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.419287644302623, "avg_score": null, "num_lines": null }
from flightsearch import flightsearch, flightresult from selenium import webdriver from bs4 import BeautifulSoup from lxml import html import uuid import time def getresult(flightsearch): PHANTOMJS_PATH = './phantomjs.exe' resultuuid = uuid.uuid4() resultbegintime = time.time() results = [] link = kayaklink(flightsearch) #browser = webdriver.PhantomJS(PHANTOMJS_PATH) #browser.get(link) #kayak_search = BeautifulSoup(browser.page_source, "html.parser") #kayak_tree = html.fromstring(kayak_search.content) #result = str(kayak_tree.xpath('//*[@id="priceAnchor264"]/a')) #result = soup.find_all('tr', {'class': 'stage-finished'}) #kayak_search = requests.get(link) #if kayak_search.status_code == 200: #kayak_tree = html.fromstring(kayak_search.content) #result = kayak_search.content #str(kayak_tree.xpath('/html/body/table/tbody/tr[1]/td[2]/text()')) #//*[@id=""]/a #//*[@id="priceAnchor160"]/a #//*[@id="content_div"]/div[6]/div/div/div[1]/div[1] #//*[@id="infolink478"]/div[2]/div[2]/div[4]/div[1]/div[2] #/html/body/table/tbody/tr[2284]/td[2]/text() #/html/body/table/tbody/tr[1692]/td[2]/text() resultendtime = time.time() result = flightresult(searchuuid = flightsearch.searchuuid, resultuuid = resultuuid, flyfrom = flightsearch.flyfrom, flyto = flightsearch.flyto, datefrom = flightsearch.datefrom, dateto = flightsearch.dateto, site = 'Kayak', siteurl = link, price = '$600', resultbegintime = resultbegintime, resultendtime = resultendtime) results.append(result) return results def kayaklink(flightsearch): kayak = 'https://www.ca.kayak.com/flights/' link = kayak + \ flightsearch.flyfrom + \ '-' + \ flightsearch.flyto + \ '/' + \ flightsearch.datefrom + \ '/' + \ flightsearch.dateto + \ '/' + \ str(flightsearch.adults) + \ 'adults' + \ getclasslink(flightsearch.fclass) + \ getchildrenlink(flightsearch.youth, flightsearch.children, flightsearch.infantseat, flightsearch.infantlap) return link def getchildrenlink(youth, children, infantseat, infantlap): if youth + children + infantseat + infantlap == 0: return '' else: childlink = '/children' for i in range (0, youth): childlink = childlink + '-17' for i in range (0, children): childlink = childlink + '-11' for i in range (0, infantseat): childlink = childlink + '-1S' for i in range (0, infantlap): childlink = childlink + '-1l' return childlink def getclasslink(fclass): if fclass == 1: return '/first' elif fclass == 2: return '/business' elif fclass == 3: return '/premium' else: return '/economy'
{ "repo_name": "brahul90/cheapflights", "path": "cheapflights/getresults/getkayak.py", "copies": "1", "size": "2930", "license": "apache-2.0", "hash": 5904435456515925000, "line_mean": 32.6781609195, "line_max": 139, "alpha_frac": 0.6102389078, "autogenerated": false, "ratio": 3.2268722466960353, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4337111154496035, "avg_score": null, "num_lines": null }
from flightsearch import flightsearch, flightresult import requests import lxml import datetime import collections import uuid import time def getresult(flightsearch): resultuuid = uuid.uuid4() resultbegintime = time.time() link = skypickerlink(flightsearch) results = [] resultendtime = time.time() result = flightresult(searchuuid = flightsearch.searchuuid, resultuuid = resultuuid, flyfrom = flightsearch.flyfrom, flyto = flightsearch.flyto, datefrom = flightsearch.datefrom, dateto = flightsearch.dateto, siteurl = link, site = 'Kiwi', price = '$500', resultbegintime = resultbegintime, resultendtime = resultendtime) results.append(result) return results def skypickerlink(flightsearch): skypicker = 'https://api.skypicker.com/flights?' linkdict = collections.OrderedDict() linkdict = { 'flyFrom={}&': flightsearch.flyfrom, 'to={}&': flightsearch.flyto, 'dateFrom={}&': transformdatetime(flightsearch.datefrom, 1), 'dateTo={}&': transformdatetime(flightsearch.dateto, 1), 'longitudeFrom={}&': flightsearch.longitudefrom, 'latitudeFrom={}&': flightsearch.latitudefrom, 'radiusFrom={}&': flightsearch.radiusfrom, 'longitudeTo={}&': flightsearch.longitudeto, 'latitudeTo={}&': flightsearch.latitudeto, 'radiusTo={}&': flightsearch.radiusto, 'daysInDestinationFrom={}&': flightsearch.daysindestfrom, 'daysInDestinationTo={}&': flightsearch.daysindestto, 'returnFrom={}&': flightsearch.returnfrom, 'returnTo={}&': flightsearch.returnto, 'typeFlight={}&': '', 'oneforcity={}&': '', 'one_per_date={}&': '', 'passengers={}&': '', 'adults={}&': flightsearch.adults, 'children={}&': flightsearch.children + flightsearch.youth, 'infants={}&': flightsearch.infantseat + flightsearch.infantlap, 'flyDays={}&': '', 'onlyWorkingDays={}&': '', 'onlyWeekends={}&': '', 'directFlights={}&': '', 'curr={}&': flightsearch.curr, 'price_from={}&': flightsearch.pricefrom, 'price_to={}&': flightsearch.priceto, 'dtimefrom={}&': flightsearch.dtimefrom, 'dtimeto={}&': flightsearch.dtimefrom, 'atimefrom={}&': flightsearch.atimefrom, 'atimeto={}&': flightsearch.atimeto, 'returndtimefrom={}&': flightsearch.returndtimefrom, 'returndtimeto={}&': flightsearch.returndtimeto, 'returnatimefrom={}&': flightsearch.returnatimefrom, 'returnatimeto={}&': flightsearch.returnatimeto, 'stopoverfrom={}&': flightsearch.stopoverfrom, 'stopoverto={}&': flightsearch.stopoverto, 'limit={}&': '100', 'sort={}&': 'price' } link = skypicker for key, value in linkdict.items(): if value != '': link = link + key.format(value) return link def transformdatetime(dateortime, type): if type == 1: newdate = datetime.datetime.strptime(dateortime, '%Y-%m-%d').date() return str(newdate.day).zfill(2) + '%2F' + str(newdate.month).zfill(2) + '%2F' + str(newdate.year) # Below is an example of a valid URL # ''' https://api.skypicker.com/flights? flyFrom=CZ& to=porto& dateFrom=08%2F08%2F2017& dateTo=08%2F09%2F2017& longitudeFrom=14.0000& latitudeFrom=50.2000& radiusFrom=200& longitudeTo=14.0000& latitudeTo=50.2000& radiusTo=200& daysInDestinationFrom=2& daysInDestinationTo=14& returnFrom=08%2F08%2F2017& returnTo=08%2F09%2F2017& typeFlight=oneway& oneforcity=0& one_per_date=0& passengers=1& adults=1& children=0& infants=0& flyDays=%5B0%2C1%2C2%2C3%2C4%2C5%2C6%5D& onlyWorkingDays=0& onlyWeekends=0& directFlights=0& partner=picky& partner_market=us& v=2&xml=0& curr=EUR& locale=en& price_from=1& price_to=10000& dtimefrom=00%3A00& dtimeto=00%3A00& atimefrom=00%3A00& atimeto=00%3A00& returndtimefrom=00%3A00& returndtimeto=00%3A00& returnatimefrom=00%3A00& returnatimeto=00%3A00& stopoverfrom=00%3A00& stopoverto=00%3A00& booking_token=hashed%20data& offset=0& limit=30& sort=price& asc=1 '''
{ "repo_name": "brahul90/cheapflights", "path": "cheapflights/getresults/getskypicker.py", "copies": "1", "size": "4184", "license": "apache-2.0", "hash": 2018752031801165300, "line_mean": 29.5401459854, "line_max": 120, "alpha_frac": 0.6477055449, "autogenerated": false, "ratio": 3.023121387283237, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.912031174410229, "avg_score": 0.010103037616189693, "num_lines": 137 }
from FL import * from dialogKit import * class GlyphViewDemo(object): def __init__(self): self.font= fl.font self.glyphs = {} for glyph in self.font.glyphs: self.glyphs[glyph.name] = glyph glyphNames = self.glyphs.keys() glyphNames.sort() # self.w = ModalDialog((700, 500), 'GlyphView Demo') self.w.glyphList = List((10, 10, 150, -60), glyphNames, callback=self.glyphListCallback) self.w.view = GlyphView((170, 10, 400, -60), None, None) # self.w.fillCheckBox = CheckBox((580, 10, -10, 20), 'Fill', value=True, callback=self.viewOptionsCallback) self.w.outlineCheckBox = CheckBox((580, 35, -10, 20), 'Outline', value=False, callback=self.viewOptionsCallback) self.w.pointsCheckBox = CheckBox((580, 60, -10, 20), 'Points', value=True, callback=self.viewOptionsCallback) self.w.descenderCheckBox = CheckBox((580, 85, -10, 20), 'Descender', value=True, callback=self.viewOptionsCallback) self.w.baselineCheckBox = CheckBox((580, 110, -10, 20), 'Baseline', value=True, callback=self.viewOptionsCallback) self.w.xHeightCheckBox = CheckBox((580, 135, -10, 20), 'X Height', value=True, callback=self.viewOptionsCallback) self.w.ascenderCheckBox = CheckBox((580, 160, -10, 20), 'Ascender', value=True, callback=self.viewOptionsCallback) self.w.capHeightCheckBox = CheckBox((580, 185, -10, 20), 'Cap Height', value=True, callback=self.viewOptionsCallback) self.w.upmTopCheckBox = CheckBox((580, 210, -10, 20), 'UPM Top', value=False, callback=self.viewOptionsCallback) self.w.leftCheckBox = CheckBox((580, 235, -10, 20), 'Left', value=True, callback=self.viewOptionsCallback) self.w.rightCheckBox = CheckBox((580, 260, -10, 20), 'Right', value=True, callback=self.viewOptionsCallback) # self.w.open() def glyphListCallback(self, sender): selection = sender.getSelection() if not selection: font = glyph = None else: glyphName = sender[selection[0]] glyph = self.glyphs[glyphName] font = self.font self.w.view.set(font, glyph) self.w.view.update() def viewOptionsCallback(self, sender): if self.w.fillCheckBox.get() != self.w.view.getShowFill(): self.w.view.setShowFill(self.w.fillCheckBox.get()) if self.w.outlineCheckBox.get() != self.w.view.getShowOutline(): self.w.view.setShowOutline(self.w.outlineCheckBox.get()) if self.w.pointsCheckBox.get() != self.w.view.getShowOnCurvePoints(): self.w.view.setShowOnCurvePoints(self.w.pointsCheckBox.get()) if self.w.descenderCheckBox.get() != self.w.view.getShowDescender(): self.w.view.setShowDescender(self.w.descenderCheckBox.get()) if self.w.baselineCheckBox.get() != self.w.view.getShowBaseline(): self.w.view.setShowBaseline(self.w.baselineCheckBox.get()) if self.w.xHeightCheckBox.get() != self.w.view.getShowXHeight(): self.w.view.setShowXHeight(self.w.xHeightCheckBox.get()) if self.w.ascenderCheckBox.get() != self.w.view.getShowAscender(): self.w.view.setShowAscender(self.w.ascenderCheckBox.get()) if self.w.capHeightCheckBox.get() != self.w.view.getShowCapHeight(): self.w.view.setShowCapHeight(self.w.capHeightCheckBox.get()) if self.w.upmTopCheckBox.get() != self.w.view.getShowUPMTop(): self.w.view.setShowUPMTop(self.w.upmTopCheckBox.get()) if self.w.leftCheckBox.get() != self.w.view.getShowLeftSidebearing(): self.w.view.setShowLeftSidebearing(self.w.leftCheckBox.get()) if self.w.rightCheckBox.get() != self.w.view.getShowRightSidebearing(): self.w.view.setShowRightSidebearing(self.w.rightCheckBox.get()) self.w.view.update() GlyphViewDemo()
{ "repo_name": "daltonmaag/dialogKit", "path": "examples/GlyphViewDemo.py", "copies": "3", "size": "3938", "license": "mit", "hash": -3729793378817653000, "line_mean": 56.9264705882, "line_max": 125, "alpha_frac": 0.6521076689, "autogenerated": false, "ratio": 3.295397489539749, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.013332917761128252, "num_lines": 68 }
from flint import Flint class TestFlint: def test_init(self): flint = Flint("test_resources/general_test.md") assert flint.text == open("test_resources/general_test.md").read() assert flint.vals == {} assert flint.html == None def test_get(self): flint = Flint("test_resources/general_test.md") flint.add("hello", "hi") assert flint.get("hello") == "hi" def test_get_text(self): flint = Flint("test_resources/general_test.md") assert flint.getText() == "{key1}, {key2}" def test_export_text(self): flint = Flint("test_resources/general_test.md") flint.exportText("test_resources/general_test_exported.md") with open("test_resources/general_test_exported.md") as f: assert f.read() == "{key1}, {key2}" def test_get_rendered_text(self): flint = Flint("test_resources/general_test.md") flint.add_dict({"key1": "hello", "key2": "world"}) flint.render() assert flint.getRenderedText() == "hello, world" def test_export_rendered_text(self): flint = Flint("test_resources/general_test.md") flint.add_dict({"key1": "hello", "key2": "world"}) flint.render() flint.exportRenderedText("test_resources/general_test_rendered.md") with open("test_resources/general_test_rendered.md") as f: assert f.read() == "hello, world" """ Tests for flint.render() """ def test_empty_file(self): flint = Flint("test_resources/test_empty_file.md") flint.add("key", "value") flint.render() assert flint.getHTML() == "" def test_bare_markdown(self): flint = Flint("test_resources/test_bare_markdown.md") flint.render() with open("test_resources/test_bare_markdown.html") as correctHTML: assert correctHTML.read() == flint.getHTML() def test_add_individual_keys(self): flint = Flint("test_resources/general_test.md") flint.add("key1", "Hello") flint.add("key2", "world") flint.render() html = flint.getHTML() with open("test_resources/general_test.html") as correctHTML: assert correctHTML.read() == html def test_add_dict(self): flint = Flint("test_resources/general_test.md") my_dict = {"key1": "Hello", "key2": "world"} flint.add_dict(my_dict) flint.render() html = flint.getHTML() with open("test_resources/general_test.html") as correctHTML: assert correctHTML.read() == html def test_export_html(self): flint = Flint("test_resources/test_bare_markdown.md") flint.render() flint.exportHTML("test_resources/test_html_export.html") with open("test_resources/test_html_export.html") as exported: with open("test_resources/test_bare_markdown.html") as html: assert exported.read() == html.read() def test_iterate(self): flint = Flint("test_resources/test_iterate.md") flint.add_dict({"key1": [1, 2, 3, 4, 5], "key2": [1, 2, 3, 4, 5]}) flint.render() with open("test_resources/test_iterate.html") as html: assert flint.getHTML() == html.read() def test_template_dict_mismatch(self): flint = Flint("test_resources/test_mismatch.md") flint.add_dict({"key": "hello", "this key is not in the template": "Uh oh"}) flint.render() assert flint.getHTML() == "<p>hello, {world}</p>" def test_multiple_keys_in_template(self): flint = Flint("test_resources/test_multiple_keys_in_template.md") flint.add_dict({"key1": "hello", "list": [1]}) flint.render() correctHTML = "<p>hello hello</p>\n<ul>\n<li>1 1</li>\n</ul>" assert flint.getHTML() == correctHTML def test_edge_cases(self): flint = Flint("test_resources/test_edge_cases.md") dict = {"int_value_key": 1, 1: "int key", "iteration_key": [1, 2, 3, 4, 5]} flint.add_dict(dict) flint.render() with open("test_resources/test_edge_cases.html") as correctHTML: assert flint.getHTML() == correctHTML.read()
{ "repo_name": "dawsonc/Flint", "path": "test_flint.py", "copies": "1", "size": "3718", "license": "mit", "hash": -877435109250433300, "line_mean": 32.8090909091, "line_max": 69, "alpha_frac": 0.6748251748, "autogenerated": false, "ratio": 2.752035529237602, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8790456527344489, "avg_score": 0.027280835338622588, "num_lines": 110 }
from flipsyfat.cores.sd_emulator.linklayer import SDLinkLayer from migen import * from misoc.interconnect.csr import * from misoc.interconnect.csr_eventmanager import * from misoc.interconnect import wishbone class SDEmulator(Module, AutoCSR): """Core for emulating SD card memory a block at a time, with reads and writes backed by software. """ # Read and write buffers, each a single 512 byte block mem_size = 1024 def _connect_event(self, ev, act, done): # Event triggered on 'act' positive edge, pulses 'done' on clear prev_act = Signal() self.sync += prev_act.eq(act) self.comb += ev.trigger.eq(act & ~prev_act) self.comb += done.eq(ev.clear) def __init__(self, platform, pads, **kwargs): self.submodules.ll = ClockDomainsRenamer("local")(SDLinkLayer(platform, pads, **kwargs)) # Event interrupts and acknowledgment self.submodules.ev = EventManager() self.ev.read = EventSourcePulse() self.ev.write = EventSourcePulse() self.ev.finalize() self._connect_event(self.ev.read, self.ll.block_read_act, self.ll.block_read_go) self._connect_event(self.ev.write, self.ll.block_write_act, self.ll.block_write_done) # Wishbone access to SRAM buffers self.bus = wishbone.Interface() self.submodules.wb_rd_buffer = wishbone.SRAM(self.ll.rd_buffer, read_only=False) self.submodules.wb_wr_buffer = wishbone.SRAM(self.ll.wr_buffer, read_only=False) wb_slaves = [ (lambda a: a[9] == 0, self.wb_rd_buffer.bus), (lambda a: a[9] == 1, self.wb_wr_buffer.bus) ] self.submodules.wb_decoder = wishbone.Decoder(self.bus, wb_slaves, register=True) # Local reset domain self._reset = CSRStorage() self.clock_domains.cd_local = ClockDomain() self.comb += self.cd_local.clk.eq(ClockSignal()) self.comb += self.cd_local.rst.eq(ResetSignal() | self._reset.storage) # Current data operation self._read_act = CSRStatus() self._read_addr = CSRStatus(32) self._read_byteaddr = CSRStatus(32) self._read_num = CSRStatus(32) self._read_stop = CSRStatus() self._write_act = CSRStatus() self._write_addr = CSRStatus(32) self._write_byteaddr = CSRStatus(32) self._write_num = CSRStatus(32) self._preerase_num = CSRStatus(23) self._erase_start = CSRStatus(32) self._erase_end = CSRStatus(32) self.comb += [ self._read_act.status.eq(self.ll.block_read_act), self._read_addr.status.eq(self.ll.block_read_addr), self._read_byteaddr.status.eq(self.ll.block_read_byteaddr), self._read_num.status.eq(self.ll.block_read_num), self._read_stop.status.eq(self.ll.block_read_stop), self._write_act.status.eq(self.ll.block_write_act), self._write_addr.status.eq(self.ll.block_write_addr), self._write_byteaddr.status.eq(self.ll.block_write_byteaddr), self._write_num.status.eq(self.ll.block_write_num), self._preerase_num.status.eq(self.ll.block_preerase_num), self._erase_start.status.eq(self.ll.block_erase_start), self._erase_end.status.eq(self.ll.block_erase_end), ] # Informational registers, not needed for data transfer self._info_bits = CSRStatus(16) self.comb += self._info_bits.status.eq(Cat( self.ll.mode_4bit, self.ll.mode_spi, self.ll.host_hc_support, Constant(False), # Reserved bit 3 Constant(False), # Reserved bit 4 Constant(False), # Reserved bit 5 Constant(False), # Reserved bit 6 Constant(False), # Reserved bit 7 self.ll.info_card_desel, self.ll.err_op_out_range, self.ll.err_unhandled_cmd, self.ll.err_cmd_crc, )) self._most_recent_cmd = CSRStatus(len(self.ll.cmd_in_cmd)) self.comb += self._most_recent_cmd.status.eq(self.ll.cmd_in_cmd) self._card_status = CSRStatus(len(self.ll.card_status)) self.comb += self._card_status.status.eq(self.ll.card_status)
{ "repo_name": "scanlime/flipsyfat", "path": "flipsyfat/cores/sd_emulator/core.py", "copies": "1", "size": "4333", "license": "mit", "hash": -581239820399453800, "line_mean": 43.6701030928, "line_max": 96, "alpha_frac": 0.6069697669, "autogenerated": false, "ratio": 3.3878029710711495, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9488879027606514, "avg_score": 0.0011787420729271639, "num_lines": 97 }
from flist.chat import opcode def account_login(account, password): """Log in to an f-list account. :param account: F-list account name. :param password: Password for the account. """ from flist.account import Account account = Account(account, password) return account.login() def start_chat(character, url="wss://chat.f-list.net/chat2"): """Start an instance of fchat using the specified character. :param character: Character instance :param server: The server to which we connect. :param dev_chat: determines which chat we connect to. :param url: A url to completely replace the server/port behaviour :return deferred which fires with the chat instance once the connection has been established and introduction fired. """ from flist.fchat import Connection from flist.chat.protocol import FChatProtocol from flist.chat.transport import DefaultFChatTransport transport = DefaultFChatTransport(url) protocol = FChatProtocol(transport) chat = Connection(protocol, character).connect() return chat
{ "repo_name": "StormyDragon/python-flist", "path": "flist/__init__.py", "copies": "1", "size": "1086", "license": "bsd-2-clause", "hash": -2558520193816301600, "line_mean": 37.7857142857, "line_max": 120, "alpha_frac": 0.7329650092, "autogenerated": false, "ratio": 4.396761133603239, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5629726142803239, "avg_score": null, "num_lines": null }
from flock import Flock from shapely.geometry import Polygon import numpy as np from obstacle import Obstacle, obstacle_from_point from goal import Goal from perimeter import Perimeter class World(object): """ contains flock, perimeter and obstacles. doesn't have ros stuff other serialization to messages. """ def __init__(self, perimeter_points, num_boids=30): self.perimeter = Perimeter(perimeter_points) self.minx, self.miny, self.maxx, self.maxy = self.perimeter.get_bounds() self.num_boids = num_boids self.flock = Flock(num_boids, self.perimeter) self.static_obstacles = [] self.dynamic_obstacles = [] self.goals = [] self.agents = [] self.set_static_obstacles() def update(self): for agent in self.agents: agent.update() self.flock.update(self.static_obstacles + self.dynamic_obstacles, self.goals, self.agents) def set_dynamic_obstacles(self, polygon_perimeters): self.dynamic_obstacles = [Obstacle(p) for p in polygon_perimeters] def set_static_obstacles(self): corners = [[self.minx, self.miny], [self.minx, self.maxy], [self.maxx, self.maxy], [self.maxx, self.miny]] self.static_obstacles = [obstacle_from_point(c) for c in corners] world_barrier_points = [[-65, 190], [55, 190], [55, 210], [-65, 230]] world_barrier = Obstacle(world_barrier_points) self.static_obstacles.append(world_barrier) def set_goals(self, xys): self.goals = [Goal(*xy) for xy in xys] if __name__ == '__main__': square_perimeter = [[-100.0, -100.0], [-100.0, 100.0], [100.0, 100.0], [100.0, -100.0]] world = World(square_perimeter) world.update()
{ "repo_name": "light-swarm/lightswarm_sim", "path": "scripts/world.py", "copies": "1", "size": "1602", "license": "mit", "hash": 2124005957515773700, "line_mean": 31.04, "line_max": 108, "alpha_frac": 0.6997503121, "autogenerated": false, "ratio": 2.665557404326123, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3865307716426123, "avg_score": null, "num_lines": null }
#from Flock import new_boids #from nose.tools import assert_almost_equal #import os #import yaml from ..boids import Flock, Boid from nose.tools import assert_almost_equal, assert_raises from numpy.testing import assert_array_equal, assert_allclose from numpy import array import os import yaml def test_boid_init(): flock = Flock(50,20.0,30.0,40.0,50.0) # Check boid links properly to flock assert_almost_equal(flock.boids[0].flock.number_of_boids,50,delta=0.01) assert_almost_equal(flock.boids[0].flock.group_flying_dist*flock.group_flying_dist, 400,delta=0.01) assert_almost_equal(flock.boids[0].flock.group_flying_weight, 30,delta=0.01) assert_almost_equal(flock.boids[0].flock.nearby_boid_dist*flock.nearby_boid_dist, 1600,delta=0.01) assert_almost_equal(flock.boids[0].flock.mid_strength, 50,delta=0.01) for boid in flock.boids: # Check that position and velocity are correct assert_allclose(boid.position,(-200,450),atol=250) assert_allclose(boid.velocity,(5,0),atol=20) def test_bad_boids_regression(): regression_data=yaml.load(open(os.path.join(os.path.dirname(__file__),'fixture.yml'))) boid_data=regression_data["before"] update_boids(boid_data) for after,before in zip(regression_data["after"],boid_data): for after_value,before_value in zip(after,before): assert_almost_equal(after_value,before_value,delta=0.01)
{ "repo_name": "garethsion/Bad_Boids", "path": "boids/tests/test_boids.py", "copies": "1", "size": "1470", "license": "mit", "hash": -4352804826292466000, "line_mean": 39.8611111111, "line_max": 90, "alpha_frac": 0.7013605442, "autogenerated": false, "ratio": 3.0497925311203318, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9108904105623794, "avg_score": 0.028449793939307674, "num_lines": 36 }
from flood_mapper.models.rw import RW, RWSerializer from flood_mapper.models.rt import RT, RTSerializer from flood_mapper.models.village import Village, VillageSerializer from flood_mapper.models.flood_status import FloodStatus from flood_mapper.utilities.utilities import get_time_slice from rest_framework.response import Response from rest_framework.decorators import api_view @api_view(['GET']) def boundary_api(request, village=None, rw=None, rt=None): """ API endpoint that allows users to be viewed or edited. """ if rt: try: matching_rt = RT.objects.get( id=int(rt), rw__id=int(rw), rw__village__id=int(village)) return Response(RTSerializer(matching_rt).data) except RT.DoesNotExist: return Response(None) elif rw: if not village: return Response(None) matching_rts = RT.objects.filter( rw__id=int(rw), rw__village__id=int(village)).order_by('name') return Response(RTSerializer(matching_rts, many=True).data) elif village: matching_rws = RW.objects.filter( village__id=int(village)).order_by('name') return Response(RWSerializer(matching_rws, many=True).data) else: return Response( VillageSerializer(Village.objects.all(), many=True).data) @api_view(['GET']) def get_village_api(request, rw_id): try: rw = RW.objects.get(id=int(rw_id)) village = rw.village return Response(VillageSerializer(village).data) except RW.DoesNotExist: return Response(None) @api_view(['GET']) def get_rw_by_id(request, rw_id): try: rw = RW.objects.get(id=int(rw_id)) return Response(RWSerializer(rw).data) except RW.DoesNotExist: return Response(None) @api_view(['GET']) def boundary_flooded_api(request, time_slice='current', village=None): start_date_time, end_date_time = get_time_slice(time_slice) if village: flood_statuses = FloodStatus.objects.filter( date_time__gte=start_date_time, date_time__lte=end_date_time, rt__rw__village__id=village) rws = [ flood_status.rt.rw for flood_status in flood_statuses] return Response(RWSerializer(rws, many=True).data) flood_statuses = FloodStatus.objects.filter( date_time__gte=start_date_time, date_time__lte=end_date_time) villages = [ flood_status.rt.rw.village for flood_status in flood_statuses] return Response(VillageSerializer(villages, many=True).data) @api_view(['GET']) def all_flooded_rw(request, time_slice='current'): start_date_time, end_date_time = get_time_slice(time_slice) flood_statuses = FloodStatus.objects.filter(date_time__gte=start_date_time) if time_slice == 'current': flood_statuses = flood_statuses.filter(date_time__lte=end_date_time) all_rws = set([flood_status.rt.rw.id for flood_status in flood_statuses]) return Response(all_rws)
{ "repo_name": "kartoza/jakarta-flood-maps", "path": "django_project/flood_mapper/views/boundary_api.py", "copies": "1", "size": "3055", "license": "bsd-2-clause", "hash": -770701089704617100, "line_mean": 34.523255814, "line_max": 79, "alpha_frac": 0.6510638298, "autogenerated": false, "ratio": 3.3534577387486277, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9504521568548627, "avg_score": 0, "num_lines": 86 }
from floppyforms.__future__ import ModelForm, TextInput from package.models import Category, Package, PackageExample def package_help_text(): help_text = "" for category in Category.objects.all(): help_text += """<li><strong>{title_plural}</strong> {description}</li>""".format( title_plural=category.title_plural, description=category.description ) help_text = "<ul>{0}</ul>".format(help_text) return help_text class PackageForm(ModelForm): def __init__(self, *args, **kwargs): super(PackageForm, self).__init__(*args, **kwargs) self.fields['category'].help_text = package_help_text() self.fields['repo_url'].required = True self.fields['repo_url'].widget = TextInput(attrs={ 'placeholder': 'ex: https://github.com/django/django' }) def clean_slug(self): return self.cleaned_data['slug'].lower() class Meta: model = Package fields = ['repo_url', 'title', 'slug', 'pypi_url', 'category', ] class PackageExampleForm(ModelForm): class Meta: model = PackageExample fields = ['title', 'url'] class PackageExampleModeratorForm(ModelForm): class Meta: model = PackageExample fields = ['title', 'url', 'active'] class DocumentationForm(ModelForm): class Meta: model = Package fields = ["documentation_url", ]
{ "repo_name": "pydanny/djangopackages", "path": "package/forms.py", "copies": "2", "size": "1487", "license": "mit", "hash": 9144155056368933000, "line_mean": 27.0566037736, "line_max": 89, "alpha_frac": 0.5877605918, "autogenerated": false, "ratio": 4.212464589235127, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0005091344713986223, "num_lines": 53 }
from floppyforms import ModelForm, TextInput from package.models import Category, Package, PackageExample def package_help_text(): help_text = "" for category in Category.objects.all(): help_text += """<li><strong>{title_plural}</strong> {description}</li>""".format( title_plural=category.title_plural, description=category.description ) help_text = "<ul>{0}</ul>".format(help_text) return help_text class PackageForm(ModelForm): def __init__(self, *args, **kwargs): super(PackageForm, self).__init__(*args, **kwargs) self.fields['category'].help_text = package_help_text() self.fields['repo_url'].required = True self.fields['repo_url'].widget = TextInput(attrs={ 'placeholder': 'ex: https://github.com/django/django' }) def clean_slug(self): return self.cleaned_data['slug'].lower() class Meta: model = Package fields = ['repo_url', 'title', 'slug', 'pypi_url', 'category', ] class PackageExampleForm(ModelForm): class Meta: model = PackageExample fields = ['title', 'url'] class PackageExampleModeratorForm(ModelForm): class Meta: model = PackageExample fields = ['title', 'url', 'active']
{ "repo_name": "miketheman/opencomparison", "path": "package/forms.py", "copies": "1", "size": "1356", "license": "mit", "hash": -3700806605712444400, "line_mean": 28.4782608696, "line_max": 89, "alpha_frac": 0.5855457227, "autogenerated": false, "ratio": 4.172307692307692, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0005866114561766736, "num_lines": 46 }
from floppyforms.models import ModelChoiceField, ModelMultipleChoiceField from .widgets import Select2Widget, Select2MultipleWidget, AjaxSelect2Widget, AjaxSelect2MultipleWidget class Select2ModelChoiceField(ModelChoiceField): """ Select2ModelChoiceField Field which use Select2Widget as widget rendering Basic use of Select2, no Ajax inside """ widget = Select2Widget class Select2ModelMultipleChoiceField(ModelMultipleChoiceField): """ Select2ModelMultipleChoiceField Field which use Select2MultipleWidget as widget rendering Basic use of Select2, no Ajax inside """ widget = Select2MultipleWidget class AjaxSelect2ModelChoiceField(ModelChoiceField): """ AjaxSelect2ModelChoiceField Field which use AjaxSelect2Widget as widget rendering Use Select2 with Ajax calls for datas """ def __init__(self, *args, **kwargs): if 'resource_name' not in kwargs: raise KeyError("AjaxSelect2ModelChoiceField must define a 'resource_name' key argument") if 'api_name' not in kwargs: raise KeyError("AjaxSelect2ModelChoiceField must define a 'api_name' key argument") self.resource_name = kwargs.pop('resource_name') self.api_name = kwargs.pop('api_name') self.label_key = kwargs.pop('label_key', 'name') self.widget = AjaxSelect2Widget(resource_name=self.resource_name, api_name=self.api_name, label_key=self.label_key) field = super(AjaxSelect2ModelChoiceField, self).__init__(args, kwargs) return field class AjaxSelect2ModelMultipleChoiceField(ModelMultipleChoiceField): """ AjaxSelect2ModelMultipleChoiceField Field which use AjaxSelect2MultipleWidget as widget rendering Use Select2 with Ajax calls for datas """ def __init__(self, *args, **kwargs): if 'resource_name' not in kwargs: raise KeyError("AjaxSelect2ModelMultipleChoiceField must define a 'resource_name' key argument") if 'api_name' not in kwargs: raise KeyError("AjaxSelect2ModelMultipleChoiceField must define a 'api_name' key argument") self.resource_name = kwargs.pop('resource_name') self.api_name = kwargs.pop('api_name') self.label_key = kwargs.pop('label_key', 'name') self.widget = AjaxSelect2MultipleWidget(resource_name=self.resource_name, api_name=self.api_name, label_key=self.label_key) field = super(AjaxSelect2ModelMultipleChoiceField, self).__init__(args, kwargs) return field
{ "repo_name": "ouhouhsami/django-select2light", "path": "select2light/models.py", "copies": "1", "size": "2530", "license": "apache-2.0", "hash": 7098673126789506000, "line_mean": 41.1666666667, "line_max": 131, "alpha_frac": 0.7177865613, "autogenerated": false, "ratio": 4.093851132686084, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5311637693986084, "avg_score": null, "num_lines": null }
from floppy.graph import Graph from floppy.floppyUi import Painter2D, MainWindow import sys from PyQt5.QtWidgets import QApplication import argparse import logging logger = logging.getLogger('Floppy') logger.setLevel(logging.DEBUG) fh = logging.FileHandler('floppy.log') fh.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) logger.addHandler(fh) def run(): logger.info('Starting Floppy Application with '+' '.join(sys.argv)) app = QApplication(sys.argv) painter = initializePainter() startUI(app, painter) def initializePainter(): painter = Painter2D() Graph(painter=painter) return painter def startUI(app, painter): win = MainWindow(painter=painter) win.setArgs(parseArgv()) win.show() logger.debug('Startup successful. Handing main thread control to Qt main loop.') qtReturnValue = app.exec_() override, value = win.getFloppyReturnValue() if override: sys.exit(value) sys.exit(qtReturnValue) # try: # sys.exit(app.exec_()) # except KeyboardInterrupt: # print('Keyboard Interrupt. Shutting down gracefully.') # win.killRunner() def parseArgv(): parser = argparse.ArgumentParser() parser.add_argument('-i', action='store_true', required=False) parser.add_argument('--test', nargs=1, required=False, default=False) args = parser.parse_args() return args
{ "repo_name": "JLuebben/Floppy", "path": "floppy/main.py", "copies": "1", "size": "1473", "license": "bsd-3-clause", "hash": 6870172635524321000, "line_mean": 25.7818181818, "line_max": 85, "alpha_frac": 0.6985743381, "autogenerated": false, "ratio": 3.6014669926650367, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4800041330765037, "avg_score": null, "num_lines": null }
from floppy.node import Node, abstractNode, Input, Output, Tag from math import sin, cos, pi def norm(v: list): d = (v[0]**2 + v[1]**2 + v[2]**2)**.5 return [v[0]/d, v[1]/d, v[2]/d] @abstractNode class MathNode(Node): Tag('Math') class Add(MathNode): Input('F1', float) Input('F2', float) Output('Sum', float) def run(self): self._Sum(self._F1 + self._F2) @abstractNode class VectorNode(MathNode): Tag('Vector') class CreateVector(MathNode): Input('X', float) Input('Y', float) Input('Z', float) Output('V', float, list=True) def run(self): self._Output(self._X, self._Y, self._Z) class CrossProduct(VectorNode): Input('Vector1', float, list=True) Input('Vector2', float, list=True) Output('XProduct', float, list=True) def run(self): super(CrossProduct, self).run() v1 = self._Vector1 v2 = self._Vector2 self._XProduct(v1[1]*v2[2]-v1[2]*v2[1], v1[2]*v2[0]-v1[0]*v2[2], v1[0]*v2[1]-v1[1]*v2[0]) class DotProduct(VectorNode): """ Compute Dot product of two input Vectors. """ Input('Vector1', float, list=True) Input('Vector2', float, list=True) Output('DotProduct', float, list=False) def run(self): super(DotProduct, self).run() v1 = self._Vector1 v2 = self._Vector2 self._DotProduct(v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]) class Distance(VectorNode): Input('Position1', float, list=True) Input('Position2', float, list=True) Output('Distance', float, ) def run(self): super(Distance, self).run() v1 = self._Position1 v2 = self._Position2 d = (v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 + (v1[2]-v2[2])**2 self._Distance(d**.5) class Difference(VectorNode): Input('Vector1', float, list=True) Input('Vector2', float, list=True) Output('Difference', float, list=True) def run(self): super(Difference, self).run() v1 = self._Position1 v2 = self._Position2 self._Difference((v1[0]-v2[0]), (v1[1]-v2[1]), (v1[2]-v2[2])) class VectorSum(VectorNode): Input('Vector1', float, list=True) Input('Vector2', float, list=True) Output('Sum', float, list=True) def run(self): super(VectorSum, self).run() v1 = self._Position1 v2 = self._Position2 self._Difference((v1[0]+v2[0]), (v1[1]+v2[1]), (v1[2]+v2[2])) class Normalize(VectorNode): Input('Vector', float, list=True) Output('NVector', float, list=True) def run(self): super(Normalize, self).run() # v = self._Vector # d = (v[0]**2 + v[1]**2 + v[2]**2)**.5 # self._NVector((v[0]/d, v[1]/d, v[2]/d)) self._NVector(norm(self._Vector)) class RotateAbout(VectorNode): Input('Point', float, list=True) Input('PointOnAxis', float, list=True) Input('AxisDirection', float, list=True) Input('Degree', float) Output('RotatedPoint', float, list=True) def run(self): super(RotateAbout, self).run() point = self._Point angle = self._Degree axisDirection = self._AxisDirection axisOrigin = self._PointOnAxis t = angle * (pi/180) x, y, z = point[0], point[1], point[2] a, b, c = axisOrigin[0], axisOrigin[1], axisOrigin[2] axisDirection /= norm(axisDirection) u, v, w = axisDirection[0], axisDirection[1], axisDirection[2] xx = (a*(v**2+w**2)-u*(b*v+c*w-u*x-v*y-w*z)) * (1-cos(t)) + x*cos(t) + (-1*c*v+b*w-w*y+v*z) * sin(t) yy = (b*(u**2+w**2)-v*(a*u+c*w-u*x-v*y-w*z)) * (1-cos(t)) + y*cos(t) + ( 1*c*u-a*w+w*x-u*z) * sin(t) zz = (c*(u**2+v**2)-w*(a*u+b*v-u*x-v*y-w*z)) * (1-cos(t)) + z*cos(t) + (-1*b*u+a*v-v*x+u*y) * sin(t) self._RotatedPoint([xx, yy, zz])
{ "repo_name": "JLuebben/Floppy", "path": "floppy/CustomNodes/mathNodes.py", "copies": "1", "size": "3830", "license": "bsd-3-clause", "hash": -7975068092925885000, "line_mean": 26.7536231884, "line_max": 108, "alpha_frac": 0.5584856397, "autogenerated": false, "ratio": 2.7221037668798864, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.3780589406579886, "avg_score": null, "num_lines": null }
from floppy.node import Node, Input, Output, Tag, abstractNode from floppy.CustomNodes.crystNodes import CrystNode import subprocess import os @abstractNode class ShelxNode(CrystNode): Tag('Shelx') class RunShelxl(ShelxNode): Input('INS', str) Input('HKL', str) Input('List', int, optional=True) Input('Cycles', int) Input('DAMP', int) Input('Type', str, select=('CGLS', 'L.S.')) Output('RES', str) Output('LST', str) Output('FCF', str) Output('R1', float) def __init__(self, *args, **kwargs): super(RunShelxl, self).__init__(*args, **kwargs) self.p = None self.stdout = '' def run(self): super(RunShelxl, self).run() with open('__tmp__.ins', 'w') as fp: fp.write(self._INS) with open('__tmp__.hkl', 'w') as fp: fp.write(self._HKL) self.p = subprocess.Popen('shelxl {}'.format('__tmp__'), shell=True, stdout=subprocess.PIPE) while True: line = self.p.stdout.readline() if not line: break self.stdout += str(line)[1:] #os.waitpid(self.p.pid, 0) output = '' with open('__tmp__.res', 'r') as fp: output = fp.read() self._RES(output) with open('__tmp__.lst', 'r') as fp: output = fp.read() for line in output.splitlines(): if line.startswith(' R1 ='): line = [i for i in line.split() if i] R1 = float(line[2]) break self._R1(R1) self._LST(output) with open('__tmp__.fcf', 'r') as fp: output = fp.read() self._FCF(output) for file in os.listdir(): if file.startswith('__tmp__'): os.remove(file) def report(self): r = super(RunShelxl, self).report() r['template'] = 'ProgramTemplate' r['stdout'] = self.stdout return r
{ "repo_name": "JLuebben/Floppy", "path": "floppy/CustomNodes/shelxNodes.py", "copies": "1", "size": "1966", "license": "bsd-3-clause", "hash": 4628891048269450000, "line_mean": 26.6901408451, "line_max": 100, "alpha_frac": 0.513733469, "autogenerated": false, "ratio": 3.5044563279857397, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9437956504393846, "avg_score": 0.016046658518378765, "num_lines": 71 }
from floppy.node import Node, Input, Output, Tag, abstractNode from floppy.FloppyTypes import StructureInfo, Atom import time import random import subprocess class AMyNode(Node): Input('Inta', int) Input('Intb', int) Input('Int1', int, select=[1,2,3,4]) Input('Int3', int, select=[1,2,3,4]) Output('Int2', int) def run(self): self._Int1 self._Int2(self._Int1 + 1) class FakeWorkNode(Node): Input('inp', object) Output('out', object) def run(self): print('Working @ {}'.format(str(self._inp))) time.sleep(random.randrange(1,5)) print('Done') # self._return('Test Return Value') class IncrementNode(Node): Output('Integer', int) def setup(self): self.i = 0 def run(self): self._Integer(self.i) self.i += 1 class RandomFloat(Node): Output('Float', float) def run(self): self._Float(random.random()) class RunProgram(Node): """ Node for calling an external program of given name and given command line arguments. Returns the program's return value and its stdout output. """ Input('ProgramName', str) Input('Arguments', str) Output('ReturnValue', int) Output('StdOut', str) def run(self): programName = self._ProgramName args = [programName] + self._Arguments.split() r = 0 try: out = subprocess.check_output(args, shell=True) except subprocess.CalledProcessError as e: out = '' r = e[-1] self._ReturnValue(r) self._StdOut(out) class Range(Node): Input('EndValue', int) Output('ValueList', int, list=True) def run(self): self._ValueList(list(range(self._EndValue))) class Int2Str(Node): Input('Int', int) Output('Str', str) def run(self): self._Str(str(self._Int)) class PlotNode2(Node): Input('XX', str) Output('YY', str) def __init__(self, *args, **kwargs): super(PlotNode2, self).__init__(*args, **kwargs) self.time = time.time() self.points = [] self.counts = 0 def check(self): t = time.time() if t - self.time > 3: self.time = t return True def run(self): super(PlotNode2, self).run() self.counts += 1 self.points.append( (self.counts, (random.randint(5, 20), random.randint(5, 20), random.randint(5, 20), random.randint(5, 20)))) def report(self): r = super(PlotNode2, self).report() r['template'] = 'PlotTemplate' r['points'] = self.points[:] r['keep'] = 'points' self.points = [] return r
{ "repo_name": "JLuebben/Floppy", "path": "floppy/CustomNodes/myNodes.py", "copies": "1", "size": "2718", "license": "bsd-3-clause", "hash": -1618138531851928000, "line_mean": 20.234375, "line_max": 120, "alpha_frac": 0.5665930831, "autogenerated": false, "ratio": 3.57161629434954, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.463820937744954, "avg_score": null, "num_lines": null }
from floppy.node import Node, Input, Output, Tag, abstractNode @abstractNode class PlotNode(Node): Tag('plot') # class PlotA_vs_B(PlotNode): # pass # # # class PlotA_and_B(PlotNode): # Input('A', float) # Input('B', float) # Output('Trigger', object) # # def __init__(self,*args, **kwargs): # super(PlotA_and_B, self).__init__(*args, **kwargs) # self.data = [] # # def run(self): # super(PlotA_and_B, self).run() # self.data.append((self._A, self._B)) # # def report(self): # r = super(PlotA_and_B, self).report() # r['template'] = 'plotTemplate' # r['points'] = self.data[:] # r['keep'] = 'points' # self.data = [] # return r class PlotBarsGrouped(PlotNode): Input('A', float) Input('B', float) Output('Trigger', object) def __init__(self,*args, **kwargs): super(PlotBarsGrouped, self).__init__(*args, **kwargs) self.data = [] def run(self): super(PlotBarsGrouped, self).run() self.data.append((self._A, self._B)) def report(self): r = super(PlotBarsGrouped, self).report() r['template'] = 'plotBarsGroupedTemplate' r['points'] = self.data[:] # r['keep'] = 'points' # self.data = [] return r class PairedLinePlot(PlotNode): Input('A', float) Input('B', float) Output('Trigger', object) def setup(self): self.points = [] def run(self): super(PairedLinePlot, self).run() self.points.append((None, (self._A, self._B))) def report(self): r = super(PairedLinePlot, self).report() r['template'] = 'PlotTemplate' r['points'] = self.points[:] self.points = [] return r class LinePlot(PlotNode): Input('Value', float) Output('Trigger', object) Output('Trigger', object) def setup(self): self.points = [] def run(self): super(LinePlot, self).run() self.points.append((None, (self._Value,))) def report(self): r = super(LinePlot, self).report() r['template'] = 'PlotTemplate' r['points'] = self.points[:] self.points = [] return r
{ "repo_name": "JLuebben/Floppy", "path": "floppy/CustomNodes/infoNodes.py", "copies": "1", "size": "2230", "license": "bsd-3-clause", "hash": -5107497262924894000, "line_mean": 22.9892473118, "line_max": 62, "alpha_frac": 0.5403587444, "autogenerated": false, "ratio": 3.293943870014771, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9325758067563772, "avg_score": 0.0017089093701996927, "num_lines": 93 }
from flopy.utils.flopy_io import get_url_text class acdd: """Translate ScienceBase global metadata attributes to CF and ACDD global attributes. see: https://www.sciencebase.gov/catalog/ http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#description-of-file-contents http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery Parameters ---------- sciencebase_id : str Unique identifier for ScienceBase record (e.g. 582da7efe4b04d580bd37be8) model : flopy model object """ def __init__(self, sciencebase_id, model): self.id = sciencebase_id self.model = model self.sciencebase_url = 'https://www.sciencebase.gov/catalog/item/{}'.format(sciencebase_id) self.sb = self.get_sciencebase_metadata(sciencebase_id) # stuff Jeremy mentioned self.abstract = self.sb['summary'] self.authors = [c['name'] for c in self.sb['contacts'] if 'Originator' in c['type']] # report image? # keys that are the same in sbjson and acdd; # or additional attributes to carry over for k in ['title', 'summary', 'id', 'citation']: self.__dict__[k] = self.sb.get(k, None) # highly recommended global attributes # http://wiki.esipfed.org/index.php/Attribute_Convention_for_Data_Discovery self.keywords = [t['name'] for t in self.sb['tags']] # recommended global attributes self.naming_authority = 'ScienceBase' # org. that provides the id #self.history = None # This is a character array with a line for each invocation of a program that has modified the dataset. # Well-behaved generic netCDF applications should append a line containing: # date, time of day, user name, program name and command arguments. self.source = model.model_ws # The method of production of the original data. # If it was model-generated, source should name the model and its version. #self.processing_level = None # A textual description of the processing (or quality control) level of the data. #self.comment = None # Miscellaneous information about the data, not captured elsewhere. # This attribute is defined in the CF Conventions. self.acknowledgement = self._get_xml_attribute('datacred') #self.license = None # #self.standard_name_vocabulary = None self.date_created = self.sb['provenance']['linkProcess'].get('dateCreated') self.creator_name = self.creator.get('name') self.creator_email = self.creator.get('email') #self.creator_url = self.sb['webLinks'][0].get('uri') self.creator_institution = self.creator['organization'].get('displayText') self.institution = self.creator_institution # also in CF convention for global attributes self.project = self.sb['title'] self.publisher_name = [d.get('name') for d in self.sb['contacts'] if 'publisher' in d.get('type').lower()][0] self.publisher_email = self.sb['provenance']['linkProcess'].get('processedBy') self.publisher_url = 'https://www2.usgs.gov/water/'#self.sb['provenance']['linkProcess'].get('linkReference') self.geospatial_bounds_crs = 'EPSG:4326' self.geospatial_lat_min = self.bounds.get('minY') self.geospatial_lat_max = self.bounds.get('maxY') self.geospatial_lon_min = self.bounds.get('minX') self.geospatial_lon_max = self.bounds.get('maxX') self.geospatial_vertical_min = self.model.dis.botm.array.min() self.geospatial_vertical_max = self.model.dis.top.array.max() self.geospatial_vertical_positive = 'up' # assumed to always be up for GW models self.time_coverage_start = self.time_coverage.get('start') self.time_coverage_end = self.time_coverage.get('end') self.time_coverage_duration = self.time_coverage.get('duration') # because the start/end date formats aren't consistent between models self.time_coverage_resolution = self.time_coverage.get('resolution') self.metadata_link = self.sciencebase_url def _get_xml_attribute(self, attr): try: return list(self.xmlroot.iter(attr))[0].text except: return None @property def bounds(self): return self.sb['spatial']['boundingBox'] @property def creator(self): return [d for d in self.sb['contacts'] if 'point of contact' in d['type'].lower()][0] @property def creator_url(self): urlname = '-'.join(self.creator.get('name').replace('.', '').split()) url = 'https://www.usgs.gov/staff-profiles/' + urlname.lower() # check if it exists txt = get_url_text(url) if txt is not None: return url else: return 'unknown' @property def geospatial_bounds(self): """Describes the data's 2D or 3D geospatial extent in OGC's Well-Known Text (WKT) Geometry format""" return 'POLYGON (({0} {2}, {0} {3}, {1} {3}, {1} {2}, {0} {2}))'.format( self.geospatial_lon_min, self.geospatial_lon_max, self.geospatial_lat_min, self.geospatial_lat_max ) @property def geospatial_bounds_vertical_crs(self): """The vertical coordinate reference system (CRS) for the Z axis of the point coordinates in the geospatial_bounds attribute. """ epsg = {'NGVD29': 'EPSG:5702', 'NAVD88': 'EPSG:5703'} return epsg.get(self.vertical_datum) @property def references(self): r = [self.citation] links = [d.get('uri') for d in self.sb['webLinks'] if 'link' in d.get('type').lower()] return r + links @property def time_coverage(self): l = self.sb['dates'] tc = {} for t in ['start', 'end']: tc[t] = [d.get('dateString') for d in l if t in d['type'].lower()][0] if not self.model.dis.steady: pass return tc @property def vertical_datum(self): """try to parse the vertical datum from the xml info""" altdatum = self._get_xml_attribute('altdatum') if altdatum is not None: if '88' in altdatum: return 'NAVD88' elif '29' in altdatum: return 'NGVD29' else: return None @property def xmlroot(self): """ElementTree root element object for xml metadata""" try: return self.get_sciencebase_xml_metadata() except: None @property def xmlfile(self): return self.sb['identifiers'][0].get('key') def get_sciencebase_metadata(self, id): """Gets metadata json text for given ID from sciencebase.gov; loads into python dictionary. Fetches the reference text using the url: https://www.sciencebase.gov/catalog/item/<ID>?format=json Parameters ---------- ID : str ScienceBase ID string; e.g. 582da7efe4b04d580bd37be8 for Dane County Model Returns ------- metadata : dict Dictionary of metadata """ urlbase = 'https://www.sciencebase.gov/catalog/item/{}?format=json' url = urlbase.format(id) import json from flopy.utils.flopy_io import get_url_text text = get_url_text(url, error_msg='Need an internet connection to get metadata from ScienceBase.') d = json.loads(text) return d def get_sciencebase_xml_metadata(self): """Gets xml from sciencebase.gov, using XML url obtained from json using get_sciencebase_metadata(). Parameters ---------- ID : str ScienceBase ID string; e.g. 582da7efe4b04d580bd37be8 for Dane County Model Returns ------- metadata : dict Dictionary of metadata """ import xml.etree.ElementTree as ET from flopy.utils.flopy_io import get_url_text url = self.xmlfile text = get_url_text(url, error_msg='Need an internet connection to get metadata from ScienceBase.') return ET.fromstring(text)
{ "repo_name": "brclark-usgs/flopy", "path": "flopy/export/metadata.py", "copies": "1", "size": "8389", "license": "bsd-3-clause", "hash": 1287916476329692000, "line_mean": 38.3849765258, "line_max": 132, "alpha_frac": 0.6062701156, "autogenerated": false, "ratio": 3.816651501364877, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4922921616964877, "avg_score": null, "num_lines": null }
from floreal import models as m def allocate(limit, wishes): """ Resources allocation in case of penury: When a list of consumers want some resources, and that resource exists in insufficient quantity to satisfy the total demand, a way must be found to allocate existing resources while minimizing unsatisfaction. This heuristic finds a ceiling, a maximal authorized quantity of resources allowed per consumer. Those who asked less than the ceiling will receive what they asked for, others will receive the ceiling quantity. resources are attributed by integral values, so there might be some resources left, although less than 1unit per unsatisfied consumer. The couple of remaining resource units beyond the ceiling are attributed to the consumers who asked for the most resources, i.e. presumably the most unsatisfied ones. :param limit: total quantity allocated. :param wishes: quantities wished by each customer (dictionary, arbitrary key types). :return: quantities allocated to each customer (dictionary, same keys as above). """ # TODO Maybe remove those who wish 0? wish_values = list(wishes.values()) if sum(wish_values) <= limit: # print "There's enough for everyone!" return wishes unallocated = limit # resources left to attribute granted = {k: 0 for k in wishes.keys()} # what consumers have been granted so far n_unsatisfied = len(wishes) - wish_values.count(0) # nb of consumers still unsatisfied ceiling = 0 # current limit (increases until everything is allocated) # first stage: find a ceiling that leaves less than one unit per unsatisfied buyer while unallocated >= n_unsatisfied: lot = unallocated // n_unsatisfied # We can safely distribute at least this much ceiling += lot # print ("%i units left; allocating %i units to %i unsatisfied people" % (unallocated, lot, n_unsatisfied)) for k, wish_k in wishes.items(): wish_more_k = wish_k - granted[k] if wish_more_k > 0: # this consumer isn't satisfied yet, give him some more lot_k = min(wish_more_k, lot) # don't give more than what he asked for, though. # print ("person %i wishes %i more unit, receives %i"%(i, wish_i, lot_i)) granted[k] += lot_k unallocated -= lot_k if granted[k] == wishes[k]: n_unsatisfied -= 1 # He's satisfied now! # 2nd stage: give the remaining units, one by one, to biggest unsatisfied buyers got_leftover = sorted(wishes.keys(), key=lambda k: granted[k]-wishes[k])[0:unallocated] # print ("%i more units to distribute, they will go to %s" % (unallocated, got_leftover)) for k in got_leftover: granted[k] += 1 unallocated -= 1 # Some invariant checks if True: assert unallocated == 0 assert sum(granted.values()) == limit for k in wishes.keys(): assert granted[k] <= wishes[k] assert granted[k] <= ceiling+1 return granted def set_limit(pd, last_pc=None, reallocate=False): """ Use `allocate()` to ensure that product `pd` hasn't been granted in amount larger than `limit`. :param pd: product featuring the quantity limit """ # TODO: in case of limitation, first cancel extra users' orders if pd.quantity_limit is None: # No limit, granted==ordered for everyone return purchases = m.Purchase.objects.filter(product=pd) wishes = {pc.user_id: int(pc.quantity) for pc in purchases} formerly_granted = {pc.user_id: int(pc.quantity) for pc in purchases} if last_pc is not None: # First limit the last purchase wished = sum(wishes.values()) excess = wished - pd.quantity_limit if excess <= 0: return # No penury elif last_pc.quantity > excess: # Fixing the last purchase is enough to cancel the excess last_pc.quantity -= excess last_pc.save() return else: # The last purchase must be canceled, but that won't be enough del wishes[last_pc.user_id] last_pc.delete() # Then go on to penury re-allocation if not reallocate: return # Call the algorithm granted = allocate(int(pd.quantity_limit), wishes) # Save changed purchases into DB for pc in purchases: uid = pc.user_id if formerly_granted[uid] != granted[uid]: # Save some DB accesses pc.quantity = granted[uid] # TODO logging # print("%s %s had their purchase of %s modified: ordered %s, formerly granted %s, now granted %s" % # pc.user.first_name, pc.user.last_name, pc.product.name, pc.quantity, formerly_granted[uid], pc.quantity # ) pc.save(force_update=True)
{ "repo_name": "fab13n/caracole", "path": "floreal/penury.py", "copies": "1", "size": "5048", "license": "mit", "hash": 5160140034679438000, "line_mean": 43.8909090909, "line_max": 121, "alpha_frac": 0.6263866878, "autogenerated": false, "ratio": 4.009531374106434, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0025872044275323117, "num_lines": 110 }
from floto.api import SwfType class WorkflowType(SwfType): """ Attributes ---------- default_child_policy : Optional[str] Specify the default policy to use for the child workflow executions when a workflow execution of this type is terminated. Valid values: TERMINATE | REQUEST_CANCEL | ABANDON If not assigned, then TERMINATE will be used default_execution_start_to_close_timeout : Optional[str] Default maximum duration in seconds for executions of this workflow type. Default can be overridden when starting an execution through the StartWorkflowExecution action or StartChildWorkflowExecution decision. Duration in seconds; An integer: 0 <= timeout < 60*60*24*356 (one year) If not assigned, then str(60 * 60 * 24) (one day) will be used default_lambda_role : Optional[str] The ARN of the default IAM role to use when a workflow execution of this type invokes AWS Lambda functions """ def __init__(self, *, domain, name, version, description=None, default_task_list='default', default_task_start_to_close_timeout=None, default_task_priority='0', default_child_policy='TERMINATE', default_execution_start_to_close_timeout=None, default_lambda_role=None ): if default_task_start_to_close_timeout is None: default_task_start_to_close_timeout = str(60 * 60 * 6) super().__init__(domain=domain, name=name, version=version, description=description, default_task_list=default_task_list, default_task_start_to_close_timeout=default_task_start_to_close_timeout, default_task_priority=default_task_priority) self.default_child_policy = default_child_policy self.default_execution_start_to_close_timeout = default_execution_start_to_close_timeout or str(60 * 60 * 24) self.default_lambda_role = default_lambda_role @property def swf_attributes(self): """Class attributes as wanted by the AWS SWF API """ a = {'domain': self.domain, 'name': self.name, 'version': self.version} if self.description is not None: a['description'] = self.description if self.default_task_list is not None: a['defaultTaskList'] = {'name': self.default_task_list} if self.default_task_start_to_close_timeout is not None: a['defaultTaskStartToCloseTimeout'] = self.default_task_start_to_close_timeout if self.default_task_priority is not None: a['defaultTaskPriority'] = self.default_task_priority if self.default_child_policy is not None: a['defaultChildPolicy'] = self.default_child_policy if self.default_execution_start_to_close_timeout is not None: a['defaultExecutionStartToCloseTimeout'] = self.default_execution_start_to_close_timeout if self.default_lambda_role is not None: a['defaultLambdaRole'] = self.default_lambda_role return a
{ "repo_name": "babbel/floto", "path": "floto/api/workflow_type.py", "copies": "1", "size": "3222", "license": "mit", "hash": -2115189985443326000, "line_mean": 43.75, "line_max": 117, "alpha_frac": 0.6319056487, "autogenerated": false, "ratio": 4.261904761904762, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001094588300167192, "num_lines": 72 }
from FlowCytometryTools.core import docstring ############################### # Programmable Documentation # ############################### _doc_dict = dict( _graph_grid_layout="""\ xlim : None | 2-tuple If None automatic, otherwise specifies the xmin and xmax for the plot ylim : None | 2-tuple If None automatic, otherwise specifies the ymin and ymax for the plot row_label_xoffset : float Additional offset for the row labels in the x direction. col_label_yoffset : float Additional offset for the col labels in the y direction. hide_tick_labels : True | False Hides the tick mark labels. hide_tick_lines : True | False Hides the tick marks. hspace : float Horizontal space between subplots. wspace : float Vertical space between subplots. row_labels_kwargs : dict This dict is unpacked into the pylab.text function that draws the row labels. col_labels_kwargs : dict This dict is unpacked into the pylab.text function that draws the column labels.""", _graph_grid_layout_returns="""\ (ax_main, ax_subplots) ax_main : reference to the main axes ax_subplots : matrix of references to the subplots (e.g., ax_subplots[0, 3] references the subplot in row 0 and column 3.)""", _bases_filename_parser="""\ parser : ['name' | 'number' | 'read' | mapping | callable] Extracts a key from a filename. Later, this key is used by the position mapper to determine the location of the measurement in the measurement collection. * 'name' : Use the measurement name given in the file name. For example, '[whatever]_Well_C9_[blah].fcs' will get key 'C9'. The filename must look **exactly** like the template above. * 'number' : Use the number given in the file name. For example, '[some name].001.fcs' will get key 001. The filename must look **exactly** like the template above. * 'read' : Use the measurement ID specified in the metadata. * mapping : mapping (dict-like) from datafiles to keys. * callable : takes datafile name and returns key.""", _bases_position_mapper="""\ position_mapper : [None, callable, mapping, 'name', 'row_first_enumerator', 'col_first_enumerator'] Returns the coordinates (row, col) which correspond to the key. (The key is the key extracted from the filename by the parser.) For example, the key 'A1' corresponds to the matrix coordinates (0, 0). * None : if None, then uses the same value as the parser * callable : gets key and returns position * mapping : key:pos * 'name' : parses things like 'A1', 'G12' * 'row_first_enumerator', 'name' : converts number to positions, going over rows first * 'col_first_enumerator' : converts number to positions, going over columns first.""", _bases_ID= """\ ID : hashable Collection ID""", _bases_data_files="""\ datafiles : str | iterable A set of data files containing the measurements.""", _bases_ID_kwargs="""\ ID_kwargs: dict Additional parameters to be used when assigning IDs. Passed to '_assign_IDS_to_datafiles' method.""", _gate_available_classes="""\ [:class:`~FlowCytometryTools.ThresholdGate` | :class:`~FlowCytometryTools.IntervalGate` | \ :class:`~FlowCytometryTools.QuadGate` | :class:`~FlowCytometryTools.PolyGate` | \ :class:`~FlowCytometryTools.core.gates.CompositeGate`] """, FCMeasurement_plot_pars="""\ gates : [None | Gate | iterable of Gate] Gate should be in {_gate_available_classes}. When supplied, the gates are drawn on the plot. The gates are applied by default.""", FCMeasurement_transform_pars="""\ transform : ['hlog' | 'tlog' | 'glog' | callable] Specifies the transformation to apply to the data. * callable : a callable that does a transformation (should accept a number or array), or one of the supported named transformations. direction : ['forward' | 'inverse'] Direction of transformation. channels : str | list of str | None Names of channels to transform. If None is given, all channels will be transformed. .. warning:: Remember that transforming all channels does not always make sense. For example, when working with the time channel, one should probably keep the data as is. return_all : bool True - return all columns, with specified ones transformed. False - return only specified columns. auto_range : bool If True data range (machine range) is automatically extracted from $PnR field of metadata. .. warning:: If the data has been previously transformed its range may not match the $PnR value. In this case, auto_range should be set to False. use_spln : bool If True th transform is done using a spline. See Transformation.transform for more details. get_transformer : bool If True the transformer is returned in addition to the new Measurement. args : Additional positional arguments to be passed to the Transformation. kwargs : Additional keyword arguments to be passed to the Transformation.""", FCMeasurement_transform_examples="""\ >>> trans = original.transform('hlog') >>> trans = original.transform('tlog', th=2) >>> trans = original.transform('hlog', d=log10(2**18), auto_range=False) >>> trans = original.transform('hlog', r=1000, use_spln=True, get_transformer=True) >>> trans = original.transform('hlog', channels=['FSC-A', 'SSC-A'], b=500).transform('hlog', channels='B1-A', b=100)""", FCMeasurement_subsample_parameters="""\ key : [int | float | tuple | slice] When key is a single number, it specifies a number/fraction of events to use. Use the parameter 'order' to specify how to subsample the requested number/fraction of events. * int : specifies a number of events to use * float : specifies a fraction of events to use (a number between 0 and 1) * tuple : consists of two floats, each between 0 and 1. For example, key = (0.66666, 1.0) returns the last one third of events. * slice : applies a slice. For example, key = slice(10, 1000, 20) returns events with indexes [10, 30, 50, ...] .. note: When key is a tuple (2 floats) or a slice, the 'order' parameter is irrelevant. order : ['random' | 'start' | 'end'] Specifies which events to choose. This is only relevant when key is either an int or a float. * 'random' : chooses the events randomly (without replacement) * 'start' : subsamples starting from the start * 'end' : subsamples starting from the end auto_resize : [False | True] If True, attempts to automatically control indexing errors. For example, if there are only 1000 events in the fcs sample, but the key is set to subsample 2000 events, then an error will be raised. However, with auto_resize set to True, the key will be adjusted to 1000 events.""", graph_plotFCM_pars = """\ channel_names : [str | iterable of str] The name (or names) of the channels to plot. When one channel is specified, then a 1d histogram is plotted. kind : ['scatter' | 'histogram'] Specifies the kind of plot to use for plotting the data (only applies to 2D plots). autolabel : [False | True] If True the x and y axes are labeled automatically. colorbar : [False | True] Adds a colorbar. Only relevant when plotting a 2d histogram. xlabel_kwargs : dict kwargs to be passed to the xlabel() command ylabel_kwargs : dict kwargs to be passed to the ylabel() command bins : int | ndarray | [ndarray] specifies how to bin histograms. * int : number of bins (autopilot!) * ndarray : for 1d histograms, e.g., linspace(-1000, 10000, 100) * [ndarray] : for 2d histograms, e.g., [linspace(-1000, 10000, 100), linspace(-1000, 10000, 100)] **CAUTION** when bins=int, the bin locations are determined automatically based on the data. This means that the bins can have different widths, depending on the range of the data. When plotting using FCCollection (FCPlate), the bin locations are set according to minimum and maximum values of data from across all the FCMeasurements. If this is confusing for you, just specify the bin locations explicitely. """, common_plot_ax="""\ ax : [None | ax] Specifies which axis to plot on. If None, will plot on the current axis. """, bases_OrderedCollection_grid_plot_pars="""\ ids : [None, list of IDs] If a list of IDs is provided, then only those measurements whose ID is in the list are plotted. col_labels : [list of str, None] Labels for the columns. If None default labels are used. row_labels : [list of str, None] Labels for the rows. If None default labels are used. xlabel : str If not None, this is used to label the x axis of the top right most subplot. ylabel : str If not None, this is used to label the y axis of the top right most subplot. xlim : 2-tuple min and max x value for each subplot if None, the limits are automatically determined for each subplot ylim : 2-tuple min and max y value for each subplot if None, the limits are automatically determined for each subplot""", _containers_held_in_memory_warning="""\ .. warning:: The new Collection will hold the data for **ALL** Measurements in memory! When analyzing multiple collections (e.g., multiple 96-well plates), it may be necessary to only work one collection at a time. Please refer to the tutorials to see how this can be done.""" ) doc_replacer = docstring.DocReplacer(**_doc_dict) doc_replacer.replace()
{ "repo_name": "eyurtsev/FlowCytometryTools", "path": "FlowCytometryTools/core/common_doc.py", "copies": "1", "size": "9509", "license": "mit", "hash": -1610907865344615400, "line_mean": 40.1645021645, "line_max": 136, "alpha_frac": 0.6950257651, "autogenerated": false, "ratio": 3.8748981255093726, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5069923890609372, "avg_score": null, "num_lines": null }
from floweaver.layered_graph import LayeredGraph from floweaver.augment_view_graph import augment, elsewhere_bundles from floweaver.sankey_definition import SankeyDefinition, Ordering, ProcessGroup, Waypoint, Bundle, Elsewhere from floweaver.view_graph import view_graph def test_elsewhere_bundles_are_added_when_no_bundles_defined(): # make it easier to get started nodes = {'a': ProcessGroup(selection=['a1'])} bundles = {} order = [['a']] vd = SankeyDefinition(nodes, bundles, order) new_waypoints, new_bundles = elsewhere_bundles(vd) assert len(new_bundles) == 2 assert new_waypoints == { '__>a': Waypoint(title='→'), '__a>': Waypoint(title='→'), } # when direction is to left nodes['a'] = ProcessGroup(selection=['a1'], direction='L') vd = SankeyDefinition(nodes, bundles, order) new_waypoints, new_bundles = elsewhere_bundles(vd) assert new_waypoints == { '__>a': Waypoint(direction='L', title='←'), '__a>': Waypoint(direction='L', title='←'), } def test_elsewhere_bundles_are_not_added_when_disabled(): nodes = {'a': ProcessGroup(selection=['a1'])} bundles = {} order = [['a']] vd = SankeyDefinition(nodes, bundles, order) new_waypoints, new_bundles = elsewhere_bundles(vd, add_elsewhere_waypoints=False) assert len(new_bundles) == 2 assert new_waypoints == {} def test_elsewhere_bundles_not_added_at_minmax_rank_when_one_bundle_defined(): nodes = {'a': ProcessGroup(selection=['a1'])} bundles = {0: Bundle('a', Elsewhere)} order = [['a']] vd = SankeyDefinition(nodes, bundles, order) new_waypoints, new_bundles = elsewhere_bundles(vd) assert len(new_waypoints) == 0 assert len(new_bundles) == 0 def test_elsewhere_bundles_not_added_to_waypoints(): nodes = {'waypoint': Waypoint(), } bundles = {} order = [[], ['waypoint'], []] vd = SankeyDefinition(nodes, bundles, order) new_waypoints, new_bundles = elsewhere_bundles(vd) assert new_waypoints == {} assert new_bundles == {} def test_elsewhere_bundles(): nodes = {'a': ProcessGroup(selection=['a1']), } bundles = {} order = [[], ['a'], []] # not at min/max rank vd = SankeyDefinition(nodes, bundles, order) new_waypoints, new_bundles = elsewhere_bundles(vd) assert set(new_waypoints.keys()) == {'__a>', '__>a'} assert set(new_bundles.values()) == { Bundle('a', Elsewhere, waypoints=['__a>']), Bundle(Elsewhere, 'a', waypoints=['__>a']), } def test_elsewhere_bundles_does_not_duplicate(): nodes = { 'a': ProcessGroup(selection=('a1')), 'in': Waypoint(), 'out': Waypoint() } bundles = { 0: Bundle(Elsewhere, 'a', waypoints=['in']), 1: Bundle('a', Elsewhere, waypoints=['out']), } order = [['in'], ['a'], ['out']] # not at min/max rank vd = SankeyDefinition(nodes, bundles, order) new_waypoints, new_bundles = elsewhere_bundles(vd) assert new_bundles == {} def test_augment_waypoint_alignment(): # j -- a -- x # b # k -- c -- y # # should insert "from b" betwen x and y # and "to b" between j and k G = LayeredGraph() G.add_nodes_from([ ('a', {'node': ProcessGroup()}), ('b', {'node': ProcessGroup(selection=['b1'])}), ('c', {'node': ProcessGroup()}), ('x', {'node': ProcessGroup()}), ('y', {'node': ProcessGroup()}), ('j', {'node': ProcessGroup()}), ('k', {'node': ProcessGroup()}), ]) G.add_edges_from([ ('a', 'x', {'bundles': [2]}), ('k', 'c', {'bundles': [1]}), ('j', 'a', {'bundles': [0]}), ('c', 'y', {'bundles': [3]}), ]) G.ordering = Ordering([[['j', 'k']], [['a', 'b', 'c']], [['x', 'y']]]) new_waypoints = { 'from b': Waypoint(), 'to b': Waypoint(), } new_bundles = { 'b>': Bundle('b', Elsewhere, waypoints=['from b']), '>b': Bundle(Elsewhere, 'b', waypoints=['to b']), } G2 = augment(G, new_waypoints, new_bundles) assert set(G2.nodes()).difference(G.nodes()) == {'from b', 'to b'} assert G2.ordering == Ordering([ [['j', 'to b', 'k']], [['a', 'b', 'c']], [['x', 'from b', 'y']] ])
{ "repo_name": "ricklupton/sankeyview", "path": "test/test_augment_view_graph.py", "copies": "1", "size": "4285", "license": "mit", "hash": 4898106426458951000, "line_mean": 32.4140625, "line_max": 109, "alpha_frac": 0.5639466916, "autogenerated": false, "ratio": 3.172848664688427, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4236795356288427, "avg_score": null, "num_lines": null }
from flowersdata import FlowerData import chaco.api as chacoapi import chaco.tools.api as toolsapi from traits.api import HasTraits, Instance from enable.api import Component, ComponentEditor from traitsui.api import Item, Group, View, Handler, Action size=(700,700) pd = chacoapi.ArrayPlotData() def _create_plot_component(): varnames = FlowerData['traits'] species_map = {} for idx, spec in enumerate(FlowerData['species']): species_map[spec] = idx container = chacoapi.GridContainer( padding=40, fill_padding=True, bgcolor="lightgray", use_backbuffer=True, shape=(4,4), spacing=(20,20)) for varname in varnames: pd.set_data(varname, [x[varname] for x in FlowerData['values']]) pd.set_data('species', [species_map[x['species']] for x in FlowerData['values']]) for x in range(4): for y in range(4): xname = varnames[x] yname = varnames[y] plot = chacoapi.Plot(pd, use_backbuffer=True, unified_draw=True, backbuffer_padding=True) # TODO: Why is backbuffer_padding not working with grid plot container?! plot.padding = 20 plot._pid = x*4 + y plot.plot((varnames[x], varnames[y], 'species'), type="cmap_scatter", color_mapper=chacoapi.jet, name='hello', marker = "circle") plot.border_width = 1 plot.padding = 0 plot.padding_top = 30 my_plot = plot.plots["hello"][0] my_plot.index_name = varnames[x] my_plot.value_name = varnames[y] my_plot.color_name = 'species' my_plot.data_source = id(pd) lasso_selection = toolsapi.LassoSelection( component=my_plot, selection_datasource=my_plot.index ) lasso_overlay = chacoapi.LassoOverlay(lasso_selection=lasso_selection, component=my_plot) my_plot.tools.append(lasso_selection) my_plot.overlays.append(lasso_overlay) my_plot.active_tool = lasso_selection container.add(plot) return container class DemoHandler(Handler): def do_export(self, obj): objs = {} demo.plot.add_json(objs) #hack to add plot data to serialized json pd.add_json(objs) #hack to tell us to add 'selection tool' objs[str(id(pd))]['tools'] = ['select']; print objs self.render_html_objs(str(id(demo.plot)), objs) def render_html_objs(self, main_id, objs): import jinja2 import os import os.path import simplejson fpath = os.path.join(os.path.dirname(__file__), 'export_template.html') template = jinja2.Template(open(fpath).read()) html_output = template.render(title='graph') fpath = os.path.join(os.path.dirname(__file__), 'main_template.js') template = jinja2.Template(open(fpath).read()) main_js = template.render(export_data=simplejson.dumps(objs), main_id = main_id) fpath = os.path.join(os.path.dirname(__file__), 'export.html') with open(fpath, "w+") as f: f.write(html_output) fpath = os.path.join(os.path.dirname(__file__), 'main.js') with open(fpath, "w+") as f: f.write(main_js) class Demo(HasTraits): plot = Instance(Component) traits_view = View( Group( Item('plot', editor=ComponentEditor(size=size), show_label=False), orientation = "vertical" ), handler=DemoHandler, buttons=[ Action(name='Export', action='do_export') ], resizable=True, title='hello' ) def _plot_default(self): plot = _create_plot_component() return plot demo = Demo() if __name__ == "__main__": demo.configure_traits()
{ "repo_name": "ContinuumIO/chaco", "path": "examples/continuum/flowers.py", "copies": "1", "size": "4113", "license": "bsd-3-clause", "hash": 4199635980249128000, "line_mean": 34.7652173913, "line_max": 85, "alpha_frac": 0.5572574763, "autogenerated": false, "ratio": 3.8948863636363638, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4952143839936364, "avg_score": null, "num_lines": null }
from flowfairy.feature import Feature, FeatureError from flowfairy.conf import settings import numpy as np import soundfile as sf samplerate = settings.OUTPUTLEN + 2000 outputlen = samplerate duration = settings.DURATION frequency_count = settings.CLASS_COUNT frq_min, frq_max = settings.FREQUENCY_LIMIT step = (frq_max - frq_min) / frequency_count max_amp = settings.MAX_AMP def classify(val): """ Assuming normalized input """ return np.floor(val*255) class SineGen(Feature): arr = np.arange(samplerate, dtype=np.float32) * 2 * np.pi / samplerate def feature(self, frequencies, blends, **kwargs): frq1, frq2 = frequencies, blends amp = np.random.rand(2,1) * max_amp + 1 # [1;max_amp) phase = np.random.rand(2,1) * np.pi * 2 sines = np.tile(self.arr, (2,1)) * amp x = (np.sin(sines * np.array([[ frq1[1] ], [frq2[1]]]) + phase) * amp).astype('float32') x = x[:,:,None] # add channel y = x[0] return {'y': y, 'x': x[0], 'blend': x[1], 'frqid': np.array(frq1[0], dtype=np.int32), 'frqid2': np.array(frq2[0])} class Meta: ignored_fields = ('frequencies', 'blends') class NoisySineGen(Feature): def feature(self, x, blend, **kwargs): noise = np.random.uniform(-0.5, 0.5, (2, samplerate, 1)).astype('float32') return {'x': noise[0]+x, 'blend': blend+noise[1]} class Mask(Feature): def feature(self, **kwargs): return {'m': np.ones(samplerate * duration, dtype=np.float32)} class Dropout(Feature): def feature(self, **kwargs): return {'keep_prob': np.array(0.50, dtype=np.float32)} class ConvertToClasses(Feature): def feature(self, x, y, **kwargs): return {'y': classify(y).astype('int64')} class Speech(Feature): def load(self, npz): l = np.load(npz) af = str(l['audio_file']).replace('//', '/') original, _ = sf.read(af) maxidx = original.shape[0] - outputlen retries = 5 for i in range(retries): lidx = np.random.randint(maxidx) ridx = lidx + outputlen audio = original[lidx:ridx] audio += np.abs(audio.min()) amax = audio.max() if amax <= 5e-2: #print(f'{amax} audio, file: {npz} at idx {lidx}') if i+1 == retries: raise FeatureError('Too silent') continue audio /= amax audio = audio[:,None] # None for channel dim break return l['speaker_class'].astype('int32'), audio.astype('float32') def feature(self, speaker, blend): sid, saudio = self.load(speaker) bid, baudio = self.load(blend) return {'x': saudio, 'y': saudio, 'blend': baudio, 'spkid': sid}
{ "repo_name": "WhatDo/FlowFairy", "path": "examples/sine_fix/feature.py", "copies": "1", "size": "2809", "license": "mit", "hash": 7002497622524830000, "line_mean": 26.2718446602, "line_max": 122, "alpha_frac": 0.576005696, "autogenerated": false, "ratio": 3.1956769055745164, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.42716826015745163, "avg_score": null, "num_lines": null }
from flowfairy.feature import Feature from flowfairy.conf import settings import numpy as np samplerate = settings.SAMPLERATE duration = settings.DURATION chunk = settings.CHUNK class SineGen(Feature): arr = np.arange(samplerate, dtype=np.float32) * 2 * np.pi / samplerate def feature(self, frequencies, **kwargs): frq = frequencies y = np.sin(self.arr * frq[1]).astype('float32') return {'y': y} class Meta: ignored_fields = ('frequencies','blends') class NoisySineGen(Feature): def feature(self, y, **kwargs): noise = np.random.uniform(-0.5, 0.5, samplerate).astype('float32') return {'x': noise + y} def fields(self): return ('x',) class ConvertToClasses(Feature): def feature(self, x, y, **kwargs): return {'x':x, 'y':y} class Chunk(Feature): def feature(self, x, **kwargs): k = np.ones(samplerate * duration, dtype=np.float32) j = np.random.randint(chunk, samplerate*duration) k[j:j+chunk] = 0 return {'x':x*k, 'chunk':np.array(j)} def fields(self): return ('chunk',) class Dropout(Feature): def feature(self, **kwargs): return {'keep_prob': np.array(0.50, dtype=np.float32)} def fields(self): return ('keep_prob',)
{ "repo_name": "WhatDo/FlowFairy", "path": "examples/denoise_reg/feature.py", "copies": "1", "size": "1300", "license": "mit", "hash": 7591637366353842000, "line_mean": 22.6363636364, "line_max": 74, "alpha_frac": 0.6153846154, "autogenerated": false, "ratio": 3.3248081841432224, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9417686511364998, "avg_score": 0.004501257635644987, "num_lines": 55 }
from flowfairy.feature import Feature from flowfairy.conf import settings import numpy as np samplerate = settings.SAMPLERATE duration = settings.DURATION chunk = settings.CHUNK def classify(val): val = (val-np.min(val))/(np.max(val)-np.min(val)) return np.floor(val*255) class SineGen(Feature): arr = np.arange(samplerate, dtype=np.float32) * 2 * np.pi / samplerate def feature(self, frequencies, **kwargs): frq = frequencies y = np.sin(self.arr * frq[1]).astype('float32') return {'y': y} class Meta: ignored_fields = ('frequencies','blends') class NoisySineGen(Feature): def feature(self, y, **kwargs): noise = np.random.uniform(-0.5, 0.5, samplerate).astype('float32') return {'x': noise + y} def fields(self): return ('x',) class ConvertToClasses(Feature): def feature(self, x, y, **kwargs): return {'x':classify(x), 'y':classify(y).astype('int64')} class Chunk(Feature): def feature(self, x, **kwargs): k = np.ones(samplerate * duration, dtype=np.float32) j = np.random.randint(chunk, samplerate*duration) k[j:j+chunk] = 0 return {'x':x*k, 'chunk':np.array(j)} def fields(self): return ('chunk',) class Dropout(Feature): def feature(self, **kwargs): return {'keep_prob': np.array(0.50, dtype=np.float32)} def fields(self): return ('keep_prob',)
{ "repo_name": "WhatDo/FlowFairy", "path": "examples/denoise_class/feature.py", "copies": "1", "size": "1439", "license": "mit", "hash": -2890331589388228600, "line_mean": 23.3898305085, "line_max": 74, "alpha_frac": 0.6184850591, "autogenerated": false, "ratio": 3.240990990990991, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4359476050090991, "avg_score": null, "num_lines": null }
from flowfairy.feature import Feature from flowfairy.conf import settings import numpy as np samplerate = settings.SAMPLERATE duration = settings.DURATION chunk = settings.CHUNK frequency_count = settings.CLASS_COUNT frq_min, frq_max = settings.FREQUENCY_LIMIT step = (frq_max - frq_min) / frequency_count def classify(val): val = (val-np.min(val))/(np.max(val)-np.min(val)) return np.floor(val*255) class SineGen(Feature): arr = np.arange(samplerate, dtype=np.float32) * 2 * np.pi / samplerate def feature(self, frequencies, blends, **kwargs): frq1, frq2 = frequencies, blends sines = np.tile(self.arr, (2,1)) y = np.sin(sines * np.array([[ frq1[1] ], [frq2[1]]])).astype('float32') y = y.sum(axis=0) return {'y': y} class Meta: ignored_fields = ('frequencies','blends') class NoisySineGen(Feature): def feature(self, y, **kwargs): noise = np.random.uniform(-0.5, 0.5, samplerate).astype('float32') return {'x': noise + y} def fields(self): return ('x',) class ConvertToClasses(Feature): def feature(self, x, y, **kwargs): return {'x':x, 'y':classify(y).astype('int64')} class Mask(Feature): def feature(self, **kwargs): return {'m': np.ones(samplerate * duration, dtype=np.float32)} def fields(self): return ('m',) class Chunk(Feature): def feature(self, x, **kwargs): k = np.ones(samplerate * duration, dtype=np.float32) j = np.random.randint(chunk, samplerate*duration) k[j:j+chunk] = 0 return {'x':x*k, 'chunk':np.array(j)} def fields(self): return ('chunk',) class Dropout(Feature): def feature(self, **kwargs): return {'keep_prob': np.array(0.50, dtype=np.float32)} def fields(self): return ('keep_prob',)
{ "repo_name": "WhatDo/FlowFairy", "path": "examples/denoise_reg_mult/feature.py", "copies": "2", "size": "1846", "license": "mit", "hash": 916664380585521400, "line_mean": 23.9459459459, "line_max": 80, "alpha_frac": 0.6159263272, "autogenerated": false, "ratio": 3.1235194585448394, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9714626205729078, "avg_score": 0.004963916003152482, "num_lines": 74 }
from flow import Flow import logging LOG = logging.getLogger(__name__) class Server(object): """A connection to Flow.""" def __init__(self, config): """Initialize a flow server instance.""" self.config = config self.flow = Flow( server_uri=config.uri, flowappglue=config.flowappglue, host=config.host, port=config.port, schema_dir=config.schema_dir, db_dir=config.db_dir, attachment_dir=config.attachment_dir, use_tls=config.use_tls, decrement_file=config.decrement_file, extra_config=config.extra_config, ) if not self._start_server(): if not self._setup_device(): self._setup_account() self._setup_org() self._set_profile() def _start_server(self): """Attempt to start the flow server.""" try: self.flow.start_up(username=self.config.username) LOG.info("local account '%s' started", self.config.username) return True except Flow.FlowError as start_up_err: LOG.debug("start_up failed: '%s'", str(start_up_err)) def _setup_device(self): """Create a device for an existing account.""" try: self.flow.create_device( username=self.config.username, password=self.config.password ) return True except Flow.FlowError as create_device_err: LOG.debug("Create device failed: '%s'", str(create_device_err)) def _setup_account(self): """Create an account, if it doesn't already exist.""" try: self.flow.create_account( username=self.config.username, password=self.config.password, email_confirm_code=self.config.email_confirm_code, ) except Flow.FlowError as create_account_err: LOG.error("Create account failed: '%s'", str(create_account_err)) def _setup_org(self): """"Join the org if not already a member.""" try: self.flow.new_org_join_request(oid=self.config.org_id) except Flow.FlowError as org_join_err: if "Member Already" in str(org_join_err): LOG.debug("already member of org %s", str(self.config.org_id)) else: LOG.error("org join failed: '%s'", str(org_join_err)) def _set_profile(self): """Set the user profile based on the items passed in the config.""" profile = self.flow.get_profile_item_json( display_name=getattr(self.config, 'display_name', None), biography=getattr(self.config, 'biography', None), photo=getattr(self.config, 'photo', None), ) self.flow.set_profile('profile', profile)
{ "repo_name": "SpiderOak/flowbot", "path": "src/server.py", "copies": "1", "size": "2883", "license": "mpl-2.0", "hash": 4281699192069432000, "line_mean": 34.1585365854, "line_max": 78, "alpha_frac": 0.5660770031, "autogenerated": false, "ratio": 4.049157303370786, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5115234306470786, "avg_score": null, "num_lines": null }
from flow import Flow from config import ORG_ID, CHANNEL_MAP, BOTNAME, BOTPW, NICE_NAMES try: flow = Flow(BOTNAME) except flow.FlowError: flow = Flow() flow.create_device(BOTNAME, BOTPW) class MessageHandler(object): def __init__(self, message, sender_id, channel_id): self.message = message.lower().strip() self.sender_id = sender_id self.channel_id = channel_id self.sender_name = flow.get_peer_from_id(sender_id)["username"] def respond(self): response = None if self.message == 'help': response = self.helpText() elif self.message == 'last deployment': response = self.lastDeployment() elif self.message == 'last error': response = self.lastError() elif self.message == 'last commit': response = self.lastCommit() elif self.message == 'last comment': response = self.lastComment() elif self.message == 'last issue': response = self.lastIssue() elif self.message == 'last branch': response = self.lastBranch() elif self.message == 'last build': response = self.lastBuild() elif self.message.startswith('stats since'): response = self.stats() if response: flow.send_message(ORG_ID, self.channel_id, response) def helpText(self): projects = [] for slug, channel_id in CHANNEL_MAP.items(): if channel_id == self.channel_id: projects.append(NICE_NAMES[slug]) message = '**Hi there {}!**\n'.format(self.sender_name) message += 'The projects associated with this channel are: \n{}'.format('\n '.join(projects)) message += '\n{}\n'.format('*' * 50) message += ''' Here are the things you can do: `last error`: Returns the last error message logged in this channel from sentry `last commit`: Returns the last commit logged in this channel from Github `last comment`: Returns the last comment logged in this channel from Github `last issue`: Returns the last issue logged in this channel from Github `last deployment`: Returns the last deployment logged in this channel from CodeDeploy `last build`: Returns the last build from Travis `stats since <datetime>`: Returns error and deployment counts since a given time for this channel ''' return message def lastError(self): return "Not yet implemented" def lastCommit(self): return "Not yet implemented" def lastComment(self): return "Not yet implemented" def lastIssue(self): return "Not yet implemented" def lastBranch(self): return "Not yet implemented" def lastDeployment(self): response = 'No deployment found' for message in flow.enumerate_messages(ORG_ID, self.channel_id): if 'aws codedeploy' in message['text']: response = message['text'] break return response def stats(self): return "Not yet implemented" def lastBuild(self): return "Not yet implemented" @flow.message def respond(notif_type, data): regular_messages = data["regularMessages"] for message in regular_messages: sender_id = message["senderAccountId"] if sender_id != flow.account_id(): channel_id = message["channelId"] message = message["text"] handler = MessageHandler(message, sender_id, channel_id) handler.respond() if __name__ == "__main__": import sys import signal try: deployment_id = sys.argv[1] except IndexError: deployment_id = '' with open('/tmp/bot_running.txt', 'w') as f: f.write(deployment_id) def signalHandler(signum, frame): flow.set_processing_notifications(value=False) sys.exit(0) signal.signal(signal.SIGINT, signalHandler) signal.signal(signal.SIGTERM, signalHandler) print('Listening for notifications ...') flow.process_notifications()
{ "repo_name": "datamade/semabot", "path": "bot.py", "copies": "1", "size": "4097", "license": "mit", "hash": -2204259351594323000, "line_mean": 28.9051094891, "line_max": 102, "alpha_frac": 0.6185013424, "autogenerated": false, "ratio": 4.236814891416753, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0003158211399922157, "num_lines": 137 }
from flowirc.message.irc import UserMessage, NickMessage from flowirc.middleware import MiddleWareBase __author__ = 'Olle Lundberg' class _BaseDescriptor: def __init__(self, what): self._what = what def __get__(self, obj, objtype): return self._what def __set__(self, obj, val): if self._should_send(val): self._what = val message = self._message( *[getattr(obj, field) for field in self._fields]) obj.trigger(message) def _should_send(self, val): return val is not None class _Nick(BaseDescriptor): _message = NickMessage _fields = ['nick'] class _User(BaseDescriptor): _message = UserMessage _fields = ['user', 'full_name'] _FullName = _User class IRCUser(MiddleWareBase): _full_name_template = "{name} a Flowirc bot" nick = _Nick('flowirc') user = _User('flowirc') full_name = _FullName(None) def __init__(self, full_name=None, user="flowirc", nick="flowirc"): if full_name is None: import inspect form = inspect.stack()[1] module = inspect.getmodule(form[0]) full_name = module.__name__ if full_name == '__main__': import os full_name = os.path.splitext( os.path.basename( module.__file__))[0] else: _, _, full_name = full_name.rpartition('.') del module del form self.nick = nick self.full_name = self._full_name_template.format(name=full_name) self.user = user
{ "repo_name": "lndbrg/flowirc", "path": "flowirc/user.py", "copies": "1", "size": "1641", "license": "mit", "hash": -8355621344897073000, "line_mean": 24.640625, "line_max": 72, "alpha_frac": 0.5460085314, "autogenerated": false, "ratio": 3.852112676056338, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.989378092967856, "avg_score": 0.0008680555555555555, "num_lines": 64 }
from flowlight.tasks.meta import TaskMeta from flowlight.tasks.task import _Task from flowlight.tasks.future import TaskFuture from flowlight.tasks.state import TaskState def task(func=None, *args, **kwargs): """Decorator function on task procedure which will be executed on machine cluster. :param func: the function to be decorated, act like a task. if no function specified, this will return a temporary class, which will instantiate a `_Task` object when it was called. otherwise, this will return a standard `_Task` object with parameters passed in. Usage:: >>> deferred = task() >>> isinstance(deferred, _Task) False >>> t = deferred(lambda: None) >>> isinstance(t, _Task) True >>> t2 = task(lambda: None) >>> isinstance(t2, _Task) True """ cls = _Task if func is None: class _Deffered: def __new__(_cls, func): return cls(func, *args, **kwargs) return _Deffered return cls(func, *args, **kwargs)
{ "repo_name": "tonnie17/flowlight", "path": "flowlight/tasks/__init__.py", "copies": "1", "size": "1101", "license": "mit", "hash": -7625494689734637000, "line_mean": 30.4571428571, "line_max": 86, "alpha_frac": 0.6049046322, "autogenerated": false, "ratio": 4.284046692607004, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5388951324807003, "avg_score": null, "num_lines": null }
from flowlight.utils.signal import Signal from flowlight.utils.trigger import Trigger from flowlight.tasks.meta import TaskMeta import threading class _Task: """ A task wrapper on funciton. :param func: task function called by `Node` lately. :param run_after: run the task when `run_after` is finish stage. :param run_only: only run the task when `run_only` condition is True. """ def __init__(self, func, run_after=None, run_only=None): self.func = func self.trigger = Trigger() self.on_start = Signal.func_signal(self.start) self.on_complete = Signal.func_signal(self.complete) self.on_error = Signal.func_signal(self.error) self.event = threading.Event() self.run_only = run_only self.run_after = run_after if run_after is not None and isinstance(run_after, _Task): run_after.trigger.add(Signal.func_signal(lambda: self.event.set())) def __call__(self, node, *args, **kwargs): self.meta = TaskMeta(self, run_after=self.run_after) if self.run_only is False or (callable(self.run_only) and self.run_only() is False): return Exception('Run condition check is failed.'), False if self.meta.run_after is not None and isinstance(self.meta.run_after, _Task): self.event.wait() self.event.clear() try: self.on_start.send(self.meta) result = self.func(self.meta, node, *args, **kwargs) self.on_complete.send(self.meta) return result, True except Exception as e: self.on_error.send(e) return e, False finally: for signal in self.trigger: signal.send() def start(self, *args): pass def complete(self, *args): pass def error(self, exception): import traceback traceback.print_exc() def __repr__(self): return '<Task func={}>'.format(self.func.__name__)
{ "repo_name": "tonnie17/flowlight", "path": "flowlight/tasks/task.py", "copies": "1", "size": "2009", "license": "mit", "hash": -1536842284638693400, "line_mean": 32.4833333333, "line_max": 92, "alpha_frac": 0.6077650572, "autogenerated": false, "ratio": 3.776315789473684, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9840560268253217, "avg_score": 0.008704115684093437, "num_lines": 60 }
from flow_plugin import FlowPlugin from ghost import Ghost import Cookie class ACSFlowPlugin(FlowPlugin): fed_auth_cookies = [] def __init__(self): super(ACSFlowPlugin, self).__init__() self.url = self.get_conf_option('url') self.username = self.get_conf_option('username') self.password = self.get_conf_option('password') def start_session(self): print 'Login to {0} as {1}'.format(self.url, self.username) ACSFlowPlugin.fed_auth_cookies = [] g = Ghost() with g.start() as session: session.show() session.open(self.url) session.wait_for_selector('div.windows-live-label.unselectable.tappable') session.evaluate('windowsLiveSignin();', expect_loading=True) session.wait_for_selector('form[name=f1]') session.evaluate("document.querySelector('input[name=loginfmt]').value = '{0}';".format(self.username)) session.set_field_value('input[name=passwd]', '{0}'.format(self.password)) page, resources = session.evaluate("document.querySelector('input[type=submit]').click()", expect_loading=True) cookie = Cookie.SimpleCookie(str(page.headers['Set-Cookie']).encode('ascii')) tmp = '' for key in cookie: tmp += '{0}={1}; '.format(key, cookie[key].value) print 'Using FedAuth cookie: {0}\r\n'.format(tmp) ACSFlowPlugin.fed_auth_cookies.append(tmp) def request(self, f): f.request.headers.set_all('Cookie', ACSFlowPlugin.fed_auth_cookies)
{ "repo_name": "stillinsecure/acl_audit", "path": "plugins/acs_flow_plugin.py", "copies": "2", "size": "1601", "license": "mit", "hash": 2707695558491299300, "line_mean": 37.119047619, "line_max": 123, "alpha_frac": 0.6152404747, "autogenerated": false, "ratio": 3.6889400921658986, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5304180566865898, "avg_score": null, "num_lines": null }
from flowp.testing import Behavior, skip, only, slow from flowp.files import cd, touch, mkdir, cp, sh, exists, \ isfile, isdir, pwd, Watch, rm, mv from flowp import testing import os import time class expect(testing.expect): def to_be_file(self): assert isfile(self._context) def to_be_dir(self): assert isdir(self._context) def not_exists(self): assert not exists(self._context) class Cd(Behavior): def before_each(self): self.tmpdir.enter() os.mkdir('testdir') def after_each(self): self.tmpdir.exit() def it_changes_working_directory(self): cd('testdir') expect(pwd().endswith('testdir')).to_be(True) def it_changes_working_directory_in_context(self): with cd('testdir'): expect(pwd().endswith('testdir')).to_be(True) expect(pwd().endswith('testdir')).to_be(False) class Touch(Behavior): def before_each(self): self.tmpdir.enter() def after_each(self): self.tmpdir.exit() def it_creates_empty_file(self): expect('testfile').not_exists() touch('testfile') expect('testfile').to_be_file() class Mkdir(Behavior): def before_each(self): self.tmpdir.enter() def after_each(self): self.tmpdir.exit() def it_creates_empty_directory(self): mkdir('testdir') expect('testdir').to_be_dir() class WhenPOptionGiven(Behavior): def it_creates_directories_recursivly(self): mkdir('td1/td2', p=True) class FilesBehavior(Behavior): def before_each(self): self.tmpdir.enter() touch('file0.py') mkdir('testdir1') touch('testdir1/file1.py') touch('testdir1/file2.py') mkdir('testdir2') def after_each(self): self.tmpdir.exit() class Cp(FilesBehavior): def it_copy_single_file(self): cp('testdir1/file1.py', 'testdir2/file1b.py') expect('testdir1/file1.py').to_be_file() expect('testdir2/file1b.py').to_be_file() def it_copy_group_of_files_by_glob_pattern(self): cp('testdir1/*.py', 'testdir2') expect('testdir2/file1.py').to_be_file() expect('testdir2/file2.py').to_be_file() def it_copy_group_of_files_by_file_names_list(self): cp(['testdir1/file1.py', 'testdir1/file2.py'], 'testdir2') expect('testdir2/file1.py').to_be_file() expect('testdir2/file2.py').to_be_file() def it_raise_error_if_trying_to_copy_directory(self): with expect.to_raise(IsADirectoryError): cp('testdir1', 'testdir3') class WhenROptionGiven(Behavior): def it_copy_whole_directories(self): cp('testdir1', 'testdir3', r=True) expect('testdir3/file1.py').to_be_file() expect('testdir3/file2.py').to_be_file() class Mv(FilesBehavior): def it_move_single_file(self): mv('testdir1/file1.py', 'testdir2/file1b.py') expect('testdir1/file1.py').not_exists() expect('testdir2/file1b.py').to_be_file() def it_move_group_of_files_by_glob_pattern(self): mv('testdir1/*.py', 'testdir2') expect('testdir2/file1.py').to_be_file() expect('testdir2/file2.py').to_be_file() expect('testdir1/file1.py').not_exists() expect('testdir1/file2.py').not_exists() def it_copy_group_of_files_by_file_names_list(self): mv(['testdir1/file1.py', 'testdir1/file2.py'], 'testdir2') expect('testdir2/file1.py').to_be_file() expect('testdir2/file2.py').to_be_file() expect('testdir1/file1.py').not_exists() expect('testdir1/file2.py').not_exists() def it_move_directories(self): mv('testdir1', 'testdir3') expect('testdir1').not_exists() expect('testdir3/file1.py').to_be_file() expect('testdir3/file2.py').to_be_file() @slow class WatchClass(FilesBehavior): def before_each(self): FilesBehavior.before_each(self) self.filename = False self.event = False def callback(filename, event): self.filename = filename self.event = event self.callback = callback with open('testdir1/file2.py', 'w') as f: f.write('1') with open('file0.py', 'w') as f: f.write('1') class WhenGlobPatternGiven(Behavior): def before_each(self): self.wp = Watch('testdir1/*.py', self.callback, sleep=0) expect(self.filename).to_be(False) expect(self.event).to_be(False) self.wp.wait_for_files_registered() def after_each(self): expect(self.wp.is_alive()).to_be(False) def it_monitor_files_changes(self): time.sleep(1) with open('testdir1/file2.py', 'w') as f: f.write('2') self.wp.stop_when(lambda: self.event, 1) expect(self.filename) == 'testdir1/file2.py' expect(self.event) == Watch.CHANGE def it_monitor_new_files(self): touch('testdir1/file3.py') self.wp.stop_when(lambda: self.event, 1) expect(self.filename) == 'testdir1/file3.py' expect(self.event) == Watch.NEW def it_monitor_deleted_files(self): rm('testdir1/file2.py') self.wp.stop_when(lambda: self.event, 1) expect(self.filename) == 'testdir1/file2.py' expect(self.event) == Watch.DELETE class WhenListOfGlobPatternsGiven(Behavior): def before_each(self): self.wp = Watch(['file0.py', 'testdir1/*.py'], self.callback, sleep=0) expect(self.filename).to_be(False) expect(self.event).to_be(False) self.wp.wait_for_files_registered() def after_each(self): expect(self.wp.is_alive()).to_be(False) def it_monitor_files_changes(self): time.sleep(1) with open('file0.py', 'w') as f: f.write('2') self.wp.stop_when(lambda: self.event, 1) expect(self.filename) == 'file0.py' expect(self.event) == Watch.CHANGE def it_monitor_new_files(self): touch('testdir1/file3.py') self.wp.stop_when(lambda: self.event, 1) expect(self.filename) == 'testdir1/file3.py' expect(self.event) == Watch.NEW def it_monitor_deleted_files(self): rm('testdir1/file2.py') self.wp.stop_when(lambda: self.event, 1) expect(self.filename) == 'testdir1/file2.py' expect(self.event) == Watch.DELETE class Sh(Behavior): def it_executes_shell_commands(self): ccall = self.mock('subprocess.check_call') sh('test shell command') expect(ccall).to_have_been_called_with('test shell command', shell=True)
{ "repo_name": "localmed/flowp", "path": "spec/spec_files.py", "copies": "2", "size": "6895", "license": "bsd-3-clause", "hash": 8847581021213938000, "line_mean": 30.628440367, "line_max": 80, "alpha_frac": 0.5868020305, "autogenerated": false, "ratio": 3.4066205533596836, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49934225838596835, "avg_score": null, "num_lines": null }
from flow.renderer.pyglet_renderer import PygletRenderer as Renderer import numpy as np import os import unittest import ctypes class TestPygletRenderer(unittest.TestCase): """Tests pyglet_renderer""" def setUp(self): path = os.path.dirname(os.path.abspath(__file__))[:-11] self.data = np.load( '{}/data/renderer_data/replay.npy'.format(path), allow_pickle=True ) # Default renderer parameters self.network = self.data[0] self.mode = "drgb" self.save_render = False self.sight_radius = 25 self.pxpm = 3 self.show_radius = True self.alpha = 0.9 def tearDown(self): self.renderer.close() def test_init(self): # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) # Ensure that the attributes match their correct values self.assertEqual(self.renderer.mode, self.mode) self.assertEqual(self.renderer.save_render, self.save_render) self.assertEqual(self.renderer.sight_radius, self.sight_radius) self.assertEqual(self.renderer.pxpm, self.pxpm) self.assertEqual(self.renderer.show_radius, self.show_radius) self.assertEqual(self.renderer.alpha, self.alpha) def test_render_drgb(self): # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) _human_orientations, _machine_orientations, \ _human_dynamics, _machine_dynamics, \ _human_logs, _machine_logs = self.data[100] frame = self.renderer.render( _human_orientations, _machine_orientations, _human_dynamics, _machine_dynamics, _human_logs, _machine_logs ) self.assertEqual(self.renderer.mode, 'drgb') self.assertEqual(frame.shape, (378, 378, 3)) def test_render_rgb(self): self.mode = 'rgb' # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) _human_orientations, _machine_orientations, \ _human_dynamics, _machine_dynamics, \ _human_logs, _machine_logs = self.data[100] frame = self.renderer.render( _human_orientations, _machine_orientations, _human_dynamics, _machine_dynamics, _human_logs, _machine_logs ) self.assertEqual(self.renderer.mode, 'rgb') self.assertEqual(frame.shape, (378, 378, 3)) def test_render_dgray(self): self.mode = 'dgray' # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) _human_orientations, _machine_orientations, \ _human_dynamics, _machine_dynamics, \ _human_logs, _machine_logs = self.data[100] frame = self.renderer.render( _human_orientations, _machine_orientations, _human_dynamics, _machine_dynamics, _human_logs, _machine_logs ) self.assertEqual(self.renderer.mode, 'dgray') self.assertEqual(frame.shape, (378, 378)) def test_render_gray(self): self.mode = 'gray' # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) _human_orientations, _machine_orientations, \ _human_dynamics, _machine_dynamics, \ _human_logs, _machine_logs = self.data[100] frame = self.renderer.render( _human_orientations, _machine_orientations, _human_dynamics, _machine_dynamics, _human_logs, _machine_logs ) self.assertEqual(self.renderer.mode, 'gray') self.assertEqual(frame.shape, (378, 378)) def test_get_sight(self): # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) _human_orientations, _machine_orientations, \ _human_dynamics, _machine_dynamics, \ _human_logs, _machine_logs = self.data[101] self.renderer.render( _human_orientations, _machine_orientations, _human_dynamics, _machine_dynamics, _human_logs, _machine_logs ) orientation = self.data[101][0][0] id = self.data[101][4][0][-1] sight = self.renderer.get_sight(orientation, id) self.assertEqual(sight.shape, (150, 150, 3)) def test_save_renderer(self): self.save_render = True # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, path='/tmp', sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) _human_orientations, _machine_orientations, \ _human_dynamics, _machine_dynamics, \ _human_logs, _machine_logs = self.data[101] self.renderer.render( _human_orientations, _machine_orientations, _human_dynamics, _machine_dynamics, _human_logs, _machine_logs ) save_path = self.renderer.close() saved_data = np.load(save_path, allow_pickle=True) self.assertEqual(self.data[0], saved_data[0]) self.assertEqual(self.data[101], saved_data[1]) def test_close(self): # Initialize a pyglet renderer self.renderer = Renderer( self.network, mode=self.mode, save_render=self.save_render, sight_radius=self.sight_radius, pxpm=self.pxpm, show_radius=self.show_radius, alpha=self.alpha ) self.renderer.close() _human_orientations, _machine_orientations, \ _human_dynamics, _machine_dynamics, \ _human_logs, _machine_logs = self.data[1] self.assertRaises( ctypes.ArgumentError, self.renderer.render, _human_orientations, _machine_orientations, _human_dynamics, _machine_dynamics, _human_logs, _machine_logs ) if __name__ == '__main__': unittest.main()
{ "repo_name": "cathywu/flow", "path": "tests/fast_tests/test_pyglet_renderer.py", "copies": "1", "size": "7576", "license": "mit", "hash": -2775970626385162000, "line_mean": 32.0829694323, "line_max": 71, "alpha_frac": 0.5681098205, "autogenerated": false, "ratio": 3.8811475409836067, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49492573614836066, "avg_score": null, "num_lines": null }
from flows.builtin.helpers.parser import flow_parser from flows.builtin.webserver.post import run_webserver from jobrunner.cli.parse import parse_arguments from jobrunner.plugins import register_job def parse_webserver_arguments(args=None): """ Parse the commandline options for posting a job that runs a simple HTTP webserver :param list args: Args to pass to the arg parser. Will use argv if none specified. :return obj args: parsed arguments """ parser = flow_parser( prog="jobrunner post simple_http_webserver", description='Post a job that runs a simple HTTP webserver' ) parser.add_argument( '--port', '-p', type=int, default=8080, help="The port to use to run the webserver on. Defaults to 8080" ) return parse_arguments(parser, args=args) @register_job() def simple_http_webserver(args=None): """ Post a job that runs the simple http webserver :param list args: Args to pass to the arg parser. Will use argv if none specified. :return None: """ args = parse_webserver_arguments(args=args) run_webserver(port=args.port, hierarchy=args.hierarchy)
{ "repo_name": "vdloo/jobrunner", "path": "flows/builtin/webserver/cli.py", "copies": "1", "size": "1169", "license": "apache-2.0", "hash": 8406853849376708000, "line_mean": 32.4, "line_max": 72, "alpha_frac": 0.6997433704, "autogenerated": false, "ratio": 4.0034246575342465, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5203168027934246, "avg_score": null, "num_lines": null }
from flows.builtin.helpers.parser import flow_parser from flows.simulacra.youtube_dl.post import download_videos from jobrunner.cli.parse import parse_arguments from jobrunner.plugins import register_job def parse_youtube_dl_arguments(args=None): """ Parse the commandline options for posting a job that downloads videos from YouTube to the local disk of the conductor running the flow :param list args: Args to pass to the arg parser. Will use argv if none specified. :return obj args: parsed arguments """ parser = flow_parser( prog="jobrunner post youtube_dl", description='Post a job that downloads videos from YouTube' ) parser.add_argument( 'channels_file', help="Path to a file containing a list of channels to " "download the videos of" ) return parse_arguments(parser, args=args) @register_job() def youtube_dl_latest(args=None): """ Post a job that downloads the latest youtube videos of a list of channels provided as an argument. :param list args: Args to pass to the arg parser. Will use argv if none specified. :return None: """ args = parse_youtube_dl_arguments(args=args) download_videos(channels_file=args.channels_file, hierarchy=args.hierarchy)
{ "repo_name": "vdloo/simulacra", "path": "simulacra/youtube_dl/cli.py", "copies": "1", "size": "1301", "license": "apache-2.0", "hash": 4512428490089989000, "line_mean": 33.2368421053, "line_max": 79, "alpha_frac": 0.7033051499, "autogenerated": false, "ratio": 4.143312101910828, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 38 }
from flowsieve.acl.acl_result import ACLResult, PacketMatch from flowsieve.acl.base_acl import BaseACL from flowsieve.acl.user_set import UserSet from ryu.lib.packet import ethernet class UserACL(BaseACL): def __init__(self, **kwargs): super(UserACL, self).__init__(**kwargs) self.allowed_user_names = kwargs.get("allowed_users", []) self.allowed_users = [] self.allowed_role_names = kwargs.get("allowed_roles", []) self.allowed_roles = [] self.denied_user_names = kwargs.get("denied_users", []) self.denied_users = [] self.denied_role_names = kwargs.get("denied_roles", []) self.denied_roles = [] self.is_family = kwargs.get("family", True) self.default = kwargs.get("default", "") self.user_set = UserSet.empty() def set_default(self): if self.default != "": return elif self.parent is None: self.default = "allow" else: self.default = "inherit" def load_relations(self, user_store): self.set_default() for user_name in self.allowed_user_names: user = user_store.get_user(user_name) if user is None: self._logger.warning("Unknwon user %s in section" " allowed_users of an ACL", user_name) continue self.allowed_users.append(user) for role_name in self.allowed_role_names: role = user_store.get_role(role_name) if role is None: self._logger.warning("Unknown role %s in section" " allowed_roles of an ACL", role_name) continue self.allowed_roles.append(role) for user_name in self.denied_user_names: user = user_store.get_user(user_name) if user is None: self._logger.warning("Unknwon user %s in section" " denied_users of an ACL", user_name) continue self.denied_users.append(user) for role_name in self.denied_role_names: role = user_store.get_role(role_name) if role is None: self._logger.warning("Unknown role %s in section" " denied_roles of an ACL", role_name) continue self.denied_roles.append(role) self.build_user_set() def build_user_set(self): self.user_set = UserSet.whole() default_str_low = self.default.lower() if default_str_low == "deny": self.user_set = UserSet.empty() elif default_str_low == "allow": self.user_set = UserSet.whole() elif default_str_low == "inherit" and self.parent is not None: self.parent.build_user_set() self.user_set = self.parent.user_set else: self._logger.warning("Unknown default value %s", self.default) if self.user is None and self.role is not None: my_family = UserSet(roles=[self.role]) if self.is_family: self.user_set += my_family else: self.user_set -= my_family if self.user is not None: self.user_set += UserSet(users=[self.user]) self.user_set += UserSet(users=self.allowed_users) self.user_set += UserSet(roles=self.allowed_roles) self.user_set -= UserSet(users=self.denied_users) self.user_set -= UserSet(roles=self.denied_roles) def allows_packet(self, pkt, src_user): if pkt is None: return ACLResult(src_user in self.user_set, PacketMatch()) eth = pkt.get_protocol(ethernet.ethernet) return ACLResult(src_user in self.user_set, PacketMatch(dl_dst=eth.dst)) def __repr__(self): repr_family = "" if self.is_family: repr_family = " family" return "<UserACL{0} allowed_users={1}>".format( repr_family, self.allowed_user_names )
{ "repo_name": "shimojo-lab/flowsieve", "path": "flowsieve/acl/user_acl.py", "copies": "1", "size": "4107", "license": "apache-2.0", "hash": -8682949947190889000, "line_mean": 35.3451327434, "line_max": 75, "alpha_frac": 0.5534453372, "autogenerated": false, "ratio": 3.9226361031518624, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4976081440351863, "avg_score": null, "num_lines": null }
from flows.simulacra.youtube_dl.cli import youtube_dl_latest from tests.testcase import TestCase class TestYoutubeDLLatest(TestCase): def setUp(self): self.parse_youtube_dl_arguments = self.set_up_patch( 'flows.simulacra.youtube_dl.cli.parse_youtube_dl_arguments' ) self.download_videos = self.set_up_patch( 'flows.simulacra.youtube_dl.cli.download_videos' ) def test_youtube_dl_latest_parses_youtube_dl_args(self): youtube_dl_latest() self.parse_youtube_dl_arguments.assert_called_once_with( args=None ) def test_youtube_dl_latest_parses_specified_args(self): expected_args = ['these', '--are', 'some_args'] youtube_dl_latest(args=expected_args) self.parse_youtube_dl_arguments.assert_called_once_with( args=expected_args ) def test_youtube_dl_latest_runs_youtube_dl(self): youtube_dl_latest() parsed_arguments = self.parse_youtube_dl_arguments.return_value expected_channels_file = parsed_arguments.channels_file self.download_videos.assert_called_once_with( channels_file=expected_channels_file, hierarchy=parsed_arguments.hierarchy )
{ "repo_name": "vdloo/simulacra", "path": "simulacra/tests/unit/youtube_dl/cli/test_youtube_dl_latest.py", "copies": "1", "size": "1263", "license": "apache-2.0", "hash": -1876619708943889400, "line_mean": 32.2368421053, "line_max": 71, "alpha_frac": 0.6547901821, "autogenerated": false, "ratio": 3.692982456140351, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9847772638240351, "avg_score": 0, "num_lines": 38 }
from flows.statestore.base import StateStoreBase, StateNotFound from django.db import models from django.utils import timezone from flows import config from datetime import timedelta class StateModelManager(models.Manager): def get_query_set(self): timeout = timezone.now() - timedelta(seconds=config.FLOWS_TASK_IDLE_TIMEOUT) return super(StateModelManager, self).get_query_set().filter( last_access__gte=timeout ) def remove_expired_state(self): timeout = config.FLOWS_TASK_IDLE_TIMEOUT cutoff = timezone.now() - timedelta(seconds=timeout) # directly call the superclass's queryset method because # in our own we filter out expired state! qs = super(StateModelManager, self).get_query_set() expired = qs.filter(last_access__lte=cutoff) count = expired.count() expired.delete() return count class StateModel(models.Model): objects = StateModelManager() class Meta: app_label = 'flows' task_id = models.CharField(max_length=32, unique=True) state = models.TextField(null=True) last_access = models.DateTimeField(auto_now_add=True) def __unicode__(self): return 'State for task %s' % self.task_id class StateStore(StateStoreBase): def get_state(self, task_id): try: state_model = StateModel.objects.get(task_id=task_id) except StateModel.DoesNotExist: raise StateNotFound else: state_model.last_access = timezone.now() state_model.save() return self._deserialise(state_model.state) def put_state(self, task_id, state): state_model, _ = StateModel.objects.get_or_create(task_id=task_id) state_model.state = self._serialise(state) state_model.save() def delete_state(self, task_id): StateModel.objects.filter(task_id=task_id).delete()
{ "repo_name": "maikelwever/django-flows", "path": "flows/statestore/django_store.py", "copies": "3", "size": "1944", "license": "bsd-2-clause", "hash": 4430224664881322000, "line_mean": 30.3709677419, "line_max": 96, "alpha_frac": 0.6538065844, "autogenerated": false, "ratio": 3.9114688128772634, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.013183310817936205, "num_lines": 62 }
# from flow.visualize import visualizer_rllab as vs_rllab # from flow.visualize.visualizer_rllab import visualizer_rllab from flow.visualize import visualizer_rllib as vs_rllib from flow.visualize.visualizer_rllib import visualizer_rllib import flow.visualize.capacity_diagram_generator as cdg import flow.visualize.time_space_diagram as tsd import flow.visualize.plot_ray_results as prr import os import unittest import ray import numpy as np import contextlib from io import StringIO os.environ['TEST_FLAG'] = 'True' class TestVisualizerRLlib(unittest.TestCase): """Tests visualizer_rllib""" def test_visualizer_single(self): """Test for single agent""" try: ray.init(num_cpus=1) except Exception: pass # current path current_path = os.path.realpath(__file__).rsplit('/', 1)[0] # run the experiment and check it doesn't crash arg_str = '{}/../data/rllib_data/single_agent 1 --num_rollouts 1 ' \ '--render_mode no_render ' \ '--horizon 10'.format(current_path).split() parser = vs_rllib.create_parser() pass_args = parser.parse_args(arg_str) visualizer_rllib(pass_args) # FIXME(ev) set the horizon so that this runs faster def test_visualizer_multi(self): """Test for multi-agent visualization""" try: ray.init(num_cpus=1) except Exception: pass # current path current_path = os.path.realpath(__file__).rsplit('/', 1)[0] # run the experiment and check it doesn't crash arg_str = '{}/../data/rllib_data/multi_agent 1 --num_rollouts 1 ' \ '--render_mode no_render ' \ '--horizon 10'.format(current_path).split() parser = vs_rllib.create_parser() pass_args = parser.parse_args(arg_str) visualizer_rllib(pass_args) # class TestVisualizerRLlab(unittest.TestCase): # """Tests visualizer_rllab""" # # def test_visualizer(self): # # current path # current_path = os.path.realpath(__file__).rsplit('/', 1)[0] # arg_str = '{}/../data/rllab_data/itr_0.pkl --num_rollouts 1 ' \ # '--no_render'.format(current_path).split() # parser = vs_rllab.create_parser() # pass_args = parser.parse_args(arg_str) # visualizer_rllab(pass_args) class TestPlotters(unittest.TestCase): def test_capacity_diagram_generator(self): # import the csv file dir_path = os.path.dirname(os.path.realpath(__file__)) data = cdg.import_data_from_csv( os.path.join(dir_path, 'test_files/inflows_outflows.csv')) # compute the mean and std of the outflows for all unique inflows unique_inflows, mean_outflows, std_outflows = cdg.get_capacity_data( data) # test that the values match the expected from the expected_unique_inflows = np.array([ 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900]) expected_means = np.array([ 385.2, 479.52, 575.28, 668.16, 763.2, 856.8, 900.95668831, 1029.6705856, 1111.62035833, 1187.87297462, 1258.81962238, 1257.30378783, 1161.28280975, 1101.85671862, 1261.26596639, 936.91255623, 1039.90127834, 1032.13903881, 937.70410361, 934.85669105, 837.58808324, 889.17167643, 892.78528048, 937.85757297, 934.86027655, 804.14440138]) expected_stds = np.array([ 1.60996894, 1.44, 1.44, 2.38796985, 2.78854801, 3.6, 149.57165793, 37.82554569, 67.35786443, 135.35337939, 124.41794128, 221.64466355, 280.88707947, 199.2875712, 258.72510896, 194.0785382, 239.71034056, 182.75627664, 331.37899239, 325.82943015, 467.54641633, 282.15049541, 310.36329236, 92.61828854, 229.6155371, 201.29461492]) np.testing.assert_array_almost_equal(unique_inflows, expected_unique_inflows) np.testing.assert_array_almost_equal(mean_outflows, expected_means) np.testing.assert_array_almost_equal(std_outflows, expected_stds) def test_time_space_diagram_figure_eight(self): # check that the exported data matches the expected emission file data fig8_emission_data = { 'idm_3': {'pos': [27.25, 28.25, 30.22, 33.17], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.95], 'edge': ['upper_ring', 'upper_ring', 'upper_ring', 'upper_ring']}, 'idm_4': {'pos': [56.02, 57.01, 58.99, 61.93], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.95], 'edge': ['upper_ring', 'upper_ring', 'upper_ring', 'upper_ring']}, 'idm_5': {'pos': [84.79, 85.78, 87.76, 90.7], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.95], 'edge': ['upper_ring', 'upper_ring', 'upper_ring', 'upper_ring']}, 'idm_2': {'pos': [28.77, 29.76, 1.63, 4.58], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.97, 2.95], 'edge': ['top', 'top', 'upper_ring', 'upper_ring']}, 'idm_13': {'pos': [106.79, 107.79, 109.77, 112.74], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.96], 'edge': ['lower_ring', 'lower_ring', 'lower_ring', 'lower_ring']}, 'idm_9': {'pos': [22.01, 23.0, 24.97, 27.92], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.97, 2.95], 'edge': ['left', 'left', 'left', 'left']}, 'idm_6': {'pos': [113.56, 114.55, 116.52, 119.47], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.97, 2.95], 'edge': ['upper_ring', 'upper_ring', 'upper_ring', 'upper_ring']}, 'idm_8': {'pos': [29.44, 0.28, 2.03, 4.78], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.84, 1.76, 2.75], 'edge': ['right', ':center_0', ':center_0', ':center_0']}, 'idm_12': {'pos': [78.03, 79.02, 80.99, 83.94], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.95], 'edge': ['lower_ring', 'lower_ring', 'lower_ring', 'lower_ring']}, 'idm_10': {'pos': [20.49, 21.48, 23.46, 26.41], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.95], 'edge': ['lower_ring', 'lower_ring', 'lower_ring', 'lower_ring']}, 'idm_11': {'pos': [49.26, 50.25, 52.23, 55.17], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.95], 'edge': ['lower_ring', 'lower_ring', 'lower_ring', 'lower_ring']}, 'idm_1': {'pos': [0.0, 0.99, 2.97, 5.91], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.98, 2.95], 'edge': ['top', 'top', 'top', 'top']}, 'idm_7': {'pos': [0.67, 1.66, 3.64, 6.58], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 0.99, 1.97, 2.94], 'edge': ['right', 'right', 'right', 'right']}, 'idm_0': {'pos': [0.0, 1.0, 2.98, 5.95], 'time': [1.0, 2.0, 3.0, 4.0], 'vel': [0.0, 1.0, 1.99, 2.97], 'edge': ['bottom', 'bottom', 'bottom', 'bottom']} } dir_path = os.path.dirname(os.path.realpath(__file__)) actual_emission_data = tsd.import_data_from_emission( os.path.join(dir_path, 'test_files/fig8_emission.csv')) self.assertDictEqual(fig8_emission_data, actual_emission_data) # test get_time_space_data for figure eight networks flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/fig8.json')) pos, speed, _ = tsd.get_time_space_data( actual_emission_data, flow_params) expected_pos = np.array( [[60, 23.8, 182.84166941, 154.07166941, 125.30166941, 96.54166941, -203.16166941, -174.40166941, -145.63166941, -116.86166941, -88.09166941, -59.33, -30.56, -1.79], [59, 22.81, 181.85166941, 153.08166941, 124.31166941, 95.54166941, -202.17166941, -173.40166941, -144.64166941, -115.87166941, -87.10166941, -58.34, -29.72, -0.8], [57.02, 20.83, 179.87166941, 151.10166941, 122.34166941, 93.56166941, -200.02166941, -171.43166941, -142.66166941, -113.89166941, -85.13166941, -56.36, -27.97, 208.64166941]] ) expected_speed = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.84, 0.99], [1.99, 1.98, 1.98, 1.98, 1.98, 1.98, 1.97, 1.98, 1.98, 1.98, 1.97, 1.97, 1.76, 1.97] ]) np.testing.assert_array_almost_equal(pos[:-1, :], expected_pos) np.testing.assert_array_almost_equal(speed[:-1, :], expected_speed) def test_time_space_diagram_merge(self): dir_path = os.path.dirname(os.path.realpath(__file__)) emission_data = tsd.import_data_from_emission( os.path.join(dir_path, 'test_files/merge_emission.csv')) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/merge.json')) pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) expected_pos = np.array( [[4.86, 180.32, 361.32, 547.77, 0], [4.88, 180.36, 361.36, 547.8, 0], [4.95, 180.43, 361.44, 547.87, 0], [5.06, 180.54, 361.56, 547.98, 0], [5.21, 180.68, 361.72, 548.12, 0], [5.4, 180.86, 0, 0, 0]] ) expected_speed = np.array( [[0, 0, 0, 0, 0], [0.15, 0.17, 0.19, 0.14, 0], [0.35, 0.37, 0.39, 0.34, 0], [0.54, 0.57, 0.59, 0.54, 0], [0.74, 0.7, 0.79, 0.71, 0], [0.94, 0.9, 0, 0, 0]] ) np.testing.assert_array_almost_equal(pos, expected_pos) np.testing.assert_array_almost_equal(speed, expected_speed) def test_time_space_diagram_ring_road(self): dir_path = os.path.dirname(os.path.realpath(__file__)) emission_data = tsd.import_data_from_emission( os.path.join(dir_path, 'test_files/loop_230_emission.csv')) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/loop_230.json')) pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) expected_pos = np.array( [[0.0000e+00, 9.5500e+00, 9.5550e+01, 1.0510e+02, 1.1465e+02, 1.2429e+02, 1.3384e+02, 1.4338e+02, 1.5293e+02, 1.6247e+02, 1.7202e+02, 1.8166e+02, 1.9090e+01, 1.9121e+02, 2.0075e+02, 2.8640e+01, 3.8180e+01, 4.7730e+01, 5.7270e+01, 6.6920e+01, 7.6460e+01, 8.6010e+01], [1.0000e-02, 9.5500e+00, 9.5560e+01, 1.0511e+02, 1.1465e+02, 1.2430e+02, 1.3384e+02, 1.4339e+02, 1.5294e+02, 1.6248e+02, 1.7203e+02, 1.8167e+02, 1.9100e+01, 1.9122e+02, 2.0076e+02, 2.8640e+01, 3.8190e+01, 4.7740e+01, 5.7280e+01, 6.6930e+01, 7.6470e+01, 8.6020e+01], [2.0000e-02, 9.5700e+00, 9.5580e+01, 1.0512e+02, 1.1467e+02, 1.2431e+02, 1.3386e+02, 1.4341e+02, 1.5295e+02, 1.6250e+02, 1.7204e+02, 1.8169e+02, 1.9110e+01, 1.9123e+02, 2.0078e+02, 2.8660e+01, 3.8210e+01, 4.7750e+01, 5.7300e+01, 6.6940e+01, 7.6490e+01, 8.6030e+01], [5.0000e-02, 9.5900e+00, 9.5600e+01, 1.0515e+02, 1.1469e+02, 1.2434e+02, 1.3388e+02, 1.4343e+02, 1.5297e+02, 1.6252e+02, 1.7207e+02, 1.8171e+02, 1.9140e+01, 1.9126e+02, 2.0081e+02, 2.8680e+01, 3.8230e+01, 4.7770e+01, 5.7320e+01, 6.6970e+01, 7.6510e+01, 8.6060e+01], [8.0000e-02, 9.6200e+00, 9.5630e+01, 1.0518e+02, 1.1472e+02, 1.2437e+02, 1.3391e+02, 1.4346e+02, 1.5301e+02, 1.6255e+02, 1.7210e+02, 1.8174e+02, 1.9170e+01, 1.9129e+02, 2.0085e+02, 2.8710e+01, 3.8260e+01, 4.7810e+01, 5.7350e+01, 6.7000e+01, 7.6540e+01, 8.6090e+01], [1.2000e-01, 9.6600e+00, 9.5670e+01, 1.0522e+02, 1.1476e+02, 1.2441e+02, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00]] ) expected_speed = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08], [0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.2, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16], [0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.29, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23], [0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.39, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31], [0.41, 0.41, 0.41, 0.41, 0.41, 0.41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ]) np.testing.assert_array_almost_equal(pos, expected_pos) np.testing.assert_array_almost_equal(speed, expected_speed) def test_plot_ray_results(self): dir_path = os.path.dirname(os.path.realpath(__file__)) file_path = os.path.join(dir_path, 'test_files/progress.csv') parser = prr.create_parser() # test with one column args = parser.parse_args([file_path, 'episode_reward_mean']) prr.plot_progress(args.file, args.columns) # test with several columns args = parser.parse_args([file_path, 'episode_reward_mean', 'episode_reward_min', 'episode_reward_max']) prr.plot_progress(args.file, args.columns) # test with non-existing column name with self.assertRaises(KeyError): args = parser.parse_args([file_path, 'episode_reward']) prr.plot_progress(args.file, args.columns) # test with column containing non-float values with self.assertRaises(ValueError): args = parser.parse_args([file_path, 'info']) prr.plot_progress(args.file, args.columns) # test that script outputs available column names if none is given column_names = [ 'episode_reward_max', 'episode_reward_min', 'episode_reward_mean', 'episode_len_mean', 'episodes_this_iter', 'policy_reward_mean', 'custom_metrics', 'sampler_perf', 'off_policy_estimator', 'num_metric_batches_dropped', 'info', 'timesteps_this_iter', 'done', 'timesteps_total', 'episodes_total', 'training_iteration', 'experiment_id', 'date', 'timestamp', 'time_this_iter_s', 'time_total_s', 'pid', 'hostname', 'node_ip', 'config', 'time_since_restore', 'timesteps_since_restore', 'iterations_since_restore' ] temp_stdout = StringIO() with contextlib.redirect_stdout(temp_stdout): args = parser.parse_args([file_path]) prr.plot_progress(args.file, args.columns) output = temp_stdout.getvalue() for column in column_names: self.assertTrue(column in output) if __name__ == '__main__': ray.init(num_cpus=1) unittest.main() ray.shutdown()
{ "repo_name": "cathywu/flow", "path": "tests/fast_tests/test_visualizers.py", "copies": "1", "size": "16891", "license": "mit", "hash": -6031935260031142000, "line_mean": 45.0245231608, "line_max": 79, "alpha_frac": 0.5002072109, "autogenerated": false, "ratio": 2.7831603229527104, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.378336753385271, "avg_score": null, "num_lines": null }
from flowy import finish_order from flowy import first from flowy import parallel_reduce from flowy import restart from flowy import SWFWorkflowConfig from flowy import wait w = SWFWorkflowConfig() @w(version=1) class NoTask(object): def __call__(self, n): return n @w(version=1) def Closure(): def run(n): return n return run @w(name='Named', version=1) class Arguments(object): def __call__(self, a, b, c=1, d=2): return a, b, c, d class ArgsStructErrors(object): def __init__(self, task): self.task = task def __call__(self): a = self.task() b = self.task() return self.task([[b], a]) class ArgsStructErrorsHandled(object): def __init__(self, task): self.task = task def __call__(self): from flowy import TaskError a = self.task() b = self.task() try: return wait(self.task([[b], a])) except TaskError: return 8 class Dependency(object): def __init__(self, task): self.task = task def __call__(self, n): accumulator = self.task(0) for _ in range(n): accumulator = self.task(accumulator) return accumulator class Parallel(object): def __init__(self, task): self.task = task def __call__(self, n): return list(map(self.task, range(n))) class UnhandledException(object): def __call__(self): raise RuntimeError('err!') class SingleTask(object): def __init__(self, task): self.task = task def __call__(self): return self.task() class WaitTask(object): def __init__(self, task): self.task = task def __call__(self): a = self.task() wait(a) b = self.task(a) return b class Restart(object): def __init__(self, task): self.task = task def __call__(self, r=True): a = self.task() return restart(a, 2) class PreRun(object): def __init__(self, task): self.a = task() self.b = task(self.a) def __call__(self): return self.b class PreRunError(object): def __init__(self): raise RuntimeError('err!') def __call__(self): pass class PreRunWait(object): def __init__(self, task): a = task() wait(a) self.b = task(a) def __call__(self): return self.b class DoubleDep(object): def __init__(self, task): self.task = task def __call__(self): a = self.task() b = self.task() c = self.task(a=a, b=b, c=3, d=4) return c class First(object): def __init__(self, task): self.task = task def __call__(self): a = self.task() b = self.task() return first(a, b) class First2(object): def __init__(self, task): self.task = task def __call__(self): a = finish_order([self.task() for _ in range(4)]) return next(a), next(a) class ParallelReduce(object): def __init__(self, task, red): self.task = task self.red = red def __call__(self): a = self.task() b = self.task() c = self.task() return parallel_reduce(self.red, (a, b, c)) class ParallelReduceCombined(object): def __init__(self, task, red): self.task = task self.red = red def __call__(self): a = self.task() return parallel_reduce(self.red, (a, u'a', u'b', u'c'))
{ "repo_name": "severb/flowy", "path": "tests/workflows.py", "copies": "1", "size": "3508", "license": "mit", "hash": 5507322535065888000, "line_mean": 18.2747252747, "line_max": 63, "alpha_frac": 0.5344925884, "autogenerated": false, "ratio": 3.49402390438247, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.452851649278247, "avg_score": null, "num_lines": null }
from flowy.local.decision import ActivityDecision from flowy.local.decision import WorkflowDecision from flowy.proxy import Proxy from flowy.swf.history import SWFTaskExecutionHistory as TaskHistory from flowy.tracer import TracingProxy class ActivityProxy(object): def __init__(self, identity, f): self.identity = identity self.f = f def __call__(self, decision, history, tracer): th = TaskHistory(history, self.identity) ad = ActivityDecision(decision, self.identity, self.f) if tracer is None: return Proxy(th, ad) return TracingProxy(tracer, self.identity, th, ad) class WorkflowProxy(object): def __init__(self, identity, f): self.identity = identity self.f = f def __call__(self, decision, history, tracer): th = TaskHistory(history, self.identity) wd = WorkflowDecision(decision, self.identity, self.f) if tracer is None: return Proxy(th, wd) return TracingProxy(tracer, self.identity, th, wd)
{ "repo_name": "severb/flowy", "path": "flowy/local/proxy.py", "copies": "1", "size": "1043", "license": "mit", "hash": 2443761466914922000, "line_mean": 32.6451612903, "line_max": 68, "alpha_frac": 0.6701821668, "autogenerated": false, "ratio": 3.9358490566037734, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 31 }
from flowy.swf.decision import SWFActivityTaskDecision from flowy.swf.decision import SWFWorkflowTaskDecision from flowy.swf.history import SWFTaskExecutionHistory from flowy.proxy import Proxy from flowy.utils import DescCounter class SWFActivityProxyFactory(object): """A proxy factory for activities.""" def __init__(self, identity, name, version, task_list=None, heartbeat=None, schedule_to_close=None, schedule_to_start=None, start_to_close=None, retry=(0, 0, 0), serialize_input=None, deserialize_result=None): # This is a unique name used to generate unique identifiers self.identity = identity self.name = name self.version = version self.task_list = task_list self.heartbeat = heartbeat self.schedule_to_close = schedule_to_close self.schedule_to_start = schedule_to_start self.start_to_close = start_to_close self.retry = retry self.serialize_input = serialize_input self.deserialize_result = deserialize_result def __call__(self, decision, execution_history, rate_limit=DescCounter()): """Instantiate Proxy.""" task_exec_hist = SWFTaskExecutionHistory(execution_history, self.identity) task_decision = SWFActivityTaskDecision(decision, execution_history, self, rate_limit) return Proxy(task_exec_hist, task_decision, self.retry, self.serialize_input, self.deserialize_result) class SWFWorkflowProxyFactory(object): """Same as SWFActivityProxy but for sub-workflows.""" def __init__(self, identity, name, version, task_list=None, workflow_duration=None, decision_duration=None, child_policy=None, retry=(0, 0, 0), serialize_input=None, deserialize_result=None): self.identity = identity self.name = name self.version = version self.task_list = task_list self.workflow_duration = workflow_duration self.decision_duration = decision_duration self.child_policy = child_policy self.retry = retry self.serialize_input = serialize_input self.deserialize_result = deserialize_result def __call__(self, decision, execution_history, rate_limit): """Instantiate Proxy.""" task_exec_hist = SWFTaskExecutionHistory(execution_history, self.identity) task_decision = SWFWorkflowTaskDecision(decision, execution_history, self, rate_limit) return Proxy(task_exec_hist, task_decision, self.retry, self.serialize_input, self.deserialize_result)
{ "repo_name": "severb/flowy", "path": "flowy/swf/proxy.py", "copies": "1", "size": "2806", "license": "mit", "hash": -2434183517089217000, "line_mean": 40.2647058824, "line_max": 94, "alpha_frac": 0.6325730577, "autogenerated": false, "ratio": 4.357142857142857, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5489715914842856, "avg_score": null, "num_lines": null }
from flowy.swf.decision import task_key, timer_key class SWFExecutionHistory(object): def __init__(self, running, timedout, results, errors, order): self.running = running self.timedout = timedout self.results = results self.errors = errors self.order_ = order def is_running(self, call_key): return str(call_key) in self.running def order(self, call_key): return self.order_.index(str(call_key)) def has_result(self, call_key): return str(call_key) in self.results def result(self, call_key): return self.results[str(call_key)] def is_error(self, call_key): return str(call_key) in self.errors def error(self, call_key): return self.errors[str(call_key)] def is_timeout(self, call_key): return str(call_key) in self.timedout def is_timer_ready(self, call_key): return timer_key(call_key) in self.results def is_timer_running(self, call_key): return timer_key(call_key) in self.running class SWFTaskExecutionHistory(object): def __init__(self, exec_history, identity): self.exec_history = exec_history self.identity = identity def __getattr__(self, fname): """Compute the key and delegate to exec_history.""" if fname not in ['is_running', 'is_timeout', 'is_error', 'has_result', 'result', 'order', 'error']: return getattr(super(SWFTaskExecutionHistory, self), fname) delegate_to = getattr(self.exec_history, fname) def clos(call_number, retry_number): return delegate_to(task_key(self.identity, call_number, retry_number)) setattr(self, fname, clos) # cache it return clos
{ "repo_name": "severb/flowy", "path": "flowy/swf/history.py", "copies": "1", "size": "1766", "license": "mit", "hash": -396395748719365900, "line_mean": 29.9824561404, "line_max": 82, "alpha_frac": 0.6262740657, "autogenerated": false, "ratio": 3.694560669456067, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4820834735156067, "avg_score": null, "num_lines": null }
from flowy.swf.scanner import workflow from flowy.swf.task import ActivityProxy, WorkflowProxy from flowy.task import TaskError, Workflow @workflow('SimpleReturnExample', 77, 'example_list') class SimpleReturn(Workflow): """ Does nothing, just returns the argument it receives. """ def run(self, value='hello'): return value @workflow('ActivityReturnExample', 77, 'example_list') class ActivityReturn(Workflow): """ Returns the value of the activity. """ identity = ActivityProxy('Identity', 77) def run(self): return self.identity('activity return') @workflow('SimpleDependencyExample', 77, 'example_list') class SimpleDependency(Workflow): """ Some tasks that depend on other task results. """ identity = ActivityProxy('Identity', 77) double = ActivityProxy('Double', 77) sum = ActivityProxy('Sum', 77) def run(self): a = self.identity(10) b = self.double(a) c = self.double(b) d = self.identity(100) return self.sum(a, b, c, d).result() @workflow('SequenceExample', 77, 'example_list') class Sequence(Workflow): """ A sequential set of operations. """ double = ActivityProxy('Double', 77) def run(self, n=5): n = int(n) # when starting a workflow from cmdline this is a string double = self.double(n) while double.result() < 100: double = self.double(double) return double.result() - 100 @workflow('MapReduceExample', 77, 'example_list') class MapReduce(Workflow): """ A toy map reduce example. """ square = ActivityProxy('Square', 77) sum = ActivityProxy('Sum', 77) def run(self, n=5): n = int(n) squares = map(self.square, range(n)) return self.sum(*squares) @workflow('DelayActivityExample', 77, 'example_list') class Delay(Workflow): """ Call tasks with different delays. """ identity = ActivityProxy('Identity', 77) delayed_identity = ActivityProxy('Identity', 77, delay=5) def run(self): self.identity('no delay') self.delayed_identity('5 delay') with self.options(delay=10): self.identity('10 dealy') @workflow('UnhandledErrorExample', 77, 'example_list') class UnhandledError(Workflow): """ When a task has an error the workflow will immediately fail. """ error = ActivityProxy('Error', 77) def run(self): self.error('I errd!') @workflow('HandledErrorExample', 77, 'example_list') class HandledError(Workflow): """ A failed task can be intercepted and handled correctly. """ error = ActivityProxy('Error', 77) handled_error = ActivityProxy('Error', 77, error_handling=True) def run(self): with self.options(error_handling=True): a = self.error('catch me') b = self.handled_error('catch me too') try: a.result() except TaskError: pass try: b.result() except TaskError: pass @workflow('ErrorChainingExample', 77, 'example_list') class ErrorChaining(Workflow): """ Passing the result of a failed task into another task with error handling enabled will generate a new fail result. """ error = ActivityProxy('Error', 77) identity = ActivityProxy('Identity', 77) def run(self): with self.options(error_handling=True): a = self.error('err!') b = self.identity(a) c = self.identity(b) try: c.result() except TaskError: pass @workflow('ErrorResultPassedExample', 77, 'example_list') class ErrorResultPassed(Workflow): """ Passing the result of a failed task into another task with error handling disabled will immediately fail the entire workflow. """ error = ActivityProxy('Error', 77) identity = ActivityProxy('Identity', 77) def run(self): with self.options(error_handling=True): a = self.error('err!') return self.identity(a).result() @workflow('ErrorInWorkflowExample', 77, 'example_list') class ErrorInWorkflow(Workflow): """ An unhandled exception in the run method will stop the workflow. """ def run(self): raise ValueError('stop') @workflow('TimeoutExample', 77, 'example_list') class Timeout(Workflow): """ A task that timesout will stop the workflow if it's unhandled. """ timeout = ActivityProxy('Timeout', 77) def run(self): self.timeout() @workflow('SubworkflowExample', 77, 'example_list') class SubworkflowExample(Workflow): """ Start a subworkflow. """ subwf = WorkflowProxy('SimpleReturnExample', 77) def run(self): return self.subwf()
{ "repo_name": "severb/flowy-website", "path": "flowy_module/flowy/swf/tests/workflows.py", "copies": "1", "size": "4719", "license": "mit", "hash": 893376598869004800, "line_mean": 26.2774566474, "line_max": 77, "alpha_frac": 0.6361517271, "autogenerated": false, "ratio": 4.012755102040816, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5148906829140816, "avg_score": null, "num_lines": null }
from flTileMpi import mpi from threading import Lock import traceback import time class GenericSyncer: # broadcasts data from the master node to all other nodes. def __init__(self, data=None, updateCallback=None, dataSources=None, reportFramerate = True): # data sources should have the function storeDataInDict(dict) # list of objects to sync to list of positions: [ [a1,a2,a3], [b1,b2] ] # self.data = data self.resultData = None self.dataSources = dataSources if self.dataSources == None: self.dataSources = [] self.updateCallback=updateCallback #if mpi.rank == 0: # self.dataLock = Lock() self.reportFramerate = reportFramerate self.lastTime = time.time() self.reportInterval = 2.0 self.frameCount = 0 def update(self, secs, app): self.syncAndApply() if mpi.rank == 0 and True == self.reportFramerate: self.frameCount += 1 self.updateFramerateReport() def updateFramerateReport(self): if (time.time() - self.lastTime) > self.reportInterval: print "FPS:", self.frameCount / (time.time() - self.lastTime) self.lastTime = time.time() self.frameCount = 0 def syncAndApply(self): # Get updated data from data sources for obj in self.dataSources: obj.storeDataInDict(self.data) # could lock the data and make a copy before sending, but this is # good enough for now if mpi.rank == 0: self.resultData = mpi.bcast(self.data) else: self.resultData = mpi.bcast() #if len(self.data) > 0: #if mpi.rank == 1: # print "Received data:", self.resultData if self.updateCallback != None: self.updateCallback(self.resultData) def getSyncExampleDict(self): for obj in self.dataSources: obj.storeDataInDict(self.data) return dict(self.data) class Syncer: # An older version of the more general syncer above. # The "cursors" that are synced are any graphical object whose # position needs to be synced across nodes. def __init__(self, cursorObjectsListList=None, dataObjList=None): # list of objects to sync to list of positions: [ [a1,a2,a3], [b1,b2] ] # # access: cursor[i][localcopies] # only the first object for each cursors is read, all are written if cursorObjectsListList == None: self.cursorObjects = [] else: self.cursorObjects = cursorObjectsListList if dataObjList == None: self.dataObjList = [] else: self.dataObjList = dataObjList self.data = [] for obj in self.dataObjList: self.data.append([]) self.cursors = [] for obj in self.cursorObjects: self.cursors.append( (0,0) ) self.resultCursors = [] if mpi.rank == 0: self.dataLock = Lock() def addDataObj(self, obj): # should be done on all nodes at once self.dataObjList.append(obj) self.data.append([]) def update(self, secs, app): mpi.barrier() self.syncAndApply() def syncAndApply(self): if mpi.rank == 0: self.dataLock.acquire() # this lock probably isn't necessary yet try: for i in range(len(self.cursorObjects)): #self.cursors[i] = self.cursorObjects[i][0].getPos() self.cursors[i] = (self.cursorObjects[i][0].getPos(), self.cursorObjects[i][0].getScale() or (1.0,1.0) ) for i in range(len(self.dataObjList)): self.data[i] = self.dataObjList[i].getData() # print "Sending data:", self.data #print "syncing %s cursors and %s data." % (len(self.cursorObjects), len(self.dataObjList)), self.dataObjList[0] except: traceback.print_exc() finally: self.dataLock.release() if mpi.rank == 0: self.resultCursors = mpi.bcast(self.cursors) if len(self.data) > 0: self.resultData = mpi.bcast(self.data) else: self.resultCursors = mpi.bcast() if len(self.data) > 0: self.resultData = mpi.bcast() #if mpi.rank == 1: # print "Received data:", self.resultData if mpi.rank != -1: #ok, done for all for i in range(len(self.cursorObjects)): if len(self.resultCursors) > i: # access: cursorObjects[i][localcopies] for j in range(len(self.cursorObjects[i])): # set for each local copy # print "Obj data:", self.resultCursors[i] self.cursorObjects[i][j].setPos( self.resultCursors[i][0][0], self.resultCursors[i][0][1]) self.cursorObjects[i][j].setScale( (self.resultCursors[i][1][0], self.resultCursors[i][1][1]) ) if mpi.rank != 0: for i in range(len(self.dataObjList)): if len(self.resultData) > i: self.dataObjList[i].setData(self.resultData[i]) #if len(resultQueue) > 0: # print "Bcasting recv:", resultQueue, mpi.rank
{ "repo_name": "rpwagner/tiled-display", "path": "flTile/syncer.py", "copies": "1", "size": "5443", "license": "apache-2.0", "hash": -4157321660668747300, "line_mean": 35.0463576159, "line_max": 128, "alpha_frac": 0.5625574132, "autogenerated": false, "ratio": 3.822331460674157, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4884888873874157, "avg_score": null, "num_lines": null }
from fltk import * import copy import sys if '..' not in sys.path: sys.path.append('..') import Motion.ysMotion as ym import GUI.ysBaseUI as ybu #import GUI.ysViewer3 as yv3 import GUI.seViewer3_ys as yv3 import GUI.tree as tree import Mesh.ysMesh as yms from GUI.ysViewer3 import * Fl.scheme('default') # EVENTS EV_addRenderer = 0 EV_setRendererVisible = 1 EV_addObject = 2 EV_selectObjectElement = 3 EV_selectObject = 4 CAMERA_DISTANCE = 2.8 class MultiSetting(ybu.BaseSettings): def __init__(self): ybu.BaseSettings.__init__(self) self.camera = yv3.Camera().__dict__ self.ortho = False self.viewMode = yv3.VIEW_PERSPECTIVE self.prevRotX = 0 self.prevRotY = 0 def setToApp(self, window): ybu.BaseSettings.setToApp(self, window) window.glWindow1.camera.__dict__ = copy.deepcopy(self.camera) window.glWindow1.projectionOrtho = self.ortho window.glWindow1.viewMode = self.viewMode window.glWindow1.prevRotX = self.prevRotX window.glWindow1.prevRotY = self.prevRotY window.glWindow2.camera.__dict__ = copy.deepcopy(self.camera) window.glWindow2.projectionOrtho = self.ortho window.glWindow2.viewMode = self.viewMode window.glWindow2.prevRotX = self.prevRotX window.glWindow2.prevRotY = self.prevRotY def getFromApp(self, window): ybu.BaseSettings.getFromApp(self, window) self.camera = window.glWindow1.camera.__dict__ self.ortho = window.glWindow1.projectionOrtho self.viewMode = window.glWindow1.viewMode self.prevRotX = window.glWindow1.prevRotX self.prevRotY = window.glWindow1.prevRotY class GlWindow2(yv3.GlWindow): # def viewFromFront(self): # yv3.GlWindow.viewFromFront(self) # self.syncFriend() # def viewFromRight(self): # yv3.GlWindow.viewFromRight(self) # self.syncFriend() # def viewFromTop(self): # yv3.GlWindow.viewFromTop(self) # self.syncFriend() # def viewPerspective(self): # yv3.GlWindow.viewPerspective(self) # self.syncFriend() def __init__(self, x, y, w, h, parent = None): yv3.GlWindow.__init__(self, x, y, w, h, parent) self.syncPos = True def handle(self, e): ret = yv3.GlWindow.handle(self, e) if e == FL_DRAG or e == FL_MOUSEWHEEL:# or e == FL_KEYUP: self.syncFriend() return ret def syncFriend(self): if self.syncPos: self.friend.camera.__dict__ = copy.deepcopy(self.camera.__dict__) else: self.friend.camera.rotateY = self.camera.rotateY self.friend.camera.rotateX = self.camera.rotateX self.friend.camera.distance = self.camera.distance self.friend.camera.center[1] = self.camera.center[1] # centerX = self.friend.camera.center[0] # centerZ = self.friend.camera.center[2] # self.friend.camera.__dict__ = self.camera.__dict__ # self.friend.camera.center[0] = 0 # self.friend.camera.center[2] = 0 self.friend.projectionOrtho = self.projectionOrtho self.friend.viewMode = self.viewMode self.friend.prevRotX = self.prevRotX self.friend.prevRotY = self.prevRotY self.friend.redraw() #class MultiViewer(Fl_Window): class MultiViewer(ybu.BaseWnd): # def __init__(self, x, y, w, h): # title = 'MotionViewer' # Fl_Window.__init__(self, x, y, w, h, title) def __init__(self, w=None, h=None, singleView=False, wheelWork=False, reflection=False, shadow=True, title='MultiViewer'): ybu.BaseWnd.__init__(self, None, title, MultiSetting()) if len(self.settingsFile)>0: self.settings.load(self.settingsFile) self.position(self.settings.x, self.settings.y) if w!=None and h!=None: self.size(w, h) else: self.size(self.settings.w, self.settings.h) gap = 4 self.begin() self.glContainer = Fl_Window(0, 0, self.w(), self.h()-55) self.glContainer.begin() if not singleView: self.glWindow1 = GlWindow2(0, 0, self.w()/2-gap/2, self.h()-55, self.glContainer) self.glWindow2 = GlWindow2(self.w()/2+gap/2, 0, self.w()/2, self.h()-55, self.glContainer) else: self.glWindow1 = GlWindow2(0, 0, 0, 0, self.glContainer) self.glWindow1.hide() self.glWindow2 = GlWindow2(0, 0, self.w(), self.h()-55, self.glContainer) self.glContainer.end() self.glContainer.color(FL_BLACK) # self.glWindow1 = GlWindow2(0, 0, self.w()/2-gap/2, self.h()-55, self) # self.glWindow2 = GlWindow2(self.w()/2+gap/2, 0, self.w()/2, self.h()-55, self) self.panel = ControlPanel(0, self.h()-55, self.w(), 55, self) self.end() if len(self.settingsFile)>0: self.settings.setToApp(self) if len(self.settingsFile)>0: self.settings.load(self.settingsFile) self.position(self.settings.x, self.settings.y) if w!=None and h!=None: self.size(w, h) else: self.size(self.settings.w, self.settings.h) self.resizable(self.glWindow2) self.glWindow1.friend = self.glWindow2 self.glWindow2.friend = self.glWindow1 self.glWindow1.theme = self.glWindow2.theme = yv3.THEME_GREY if not wheelWork: self.glWindow1.camera.distance = CAMERA_DISTANCE self.glWindow2.camera.distance = CAMERA_DISTANCE self.glWindow1.wheelWork = wheelWork self.glWindow1.reflection = reflection self.glWindow1.shadow = shadow self.glWindow2.wheelWork = wheelWork self.glWindow2.reflection = reflection self.glWindow2.shadow = shadow self.initialize() def initialize(self): self.playing = False self.recording = True self.frame = -1 self.maxFrame = 0 self.maxRecordedFrame = 0 self.loaded = False self.motionSystem = None self.glWindow1.renderers = [] self.glWindow2.renderers = [] self.sceneStates1 = [] self.sceneStates2 = [] self.initSceneState1 = None self.initSceneState2 = None self.panel.updateAll() self.initialUpdate = True def setSimulateCallback(self, callback): self.simulateCallback = callback def setPostFrameCallback_Always(self, callback): self.postFrameCallback_Always = callback def setCameraTarget1(self, targetPos): self.glWindow1.camera.center[0] = targetPos[0] self.glWindow1.camera.center[2] = targetPos[2] self.glWindow1.syncPos = False def setCameraTarget2(self, targetPos): self.glWindow2.camera.center[0] = targetPos[0] self.glWindow2.camera.center[2] = targetPos[2] self.glWindow2.syncPos = False def setRenderers1(self, renderers): self.glWindow1.renderers = renderers def setRenderers2(self, renderers): self.glWindow2.renderers = renderers def setMotionSystem(self, motionSystem): self.motionSystem = motionSystem self.loaded = True self.setMaxFrame(motionSystem.getMaxFrame()) self.panel.updateControl(self.loaded) def getMaxFrame(self): return self.maxFrame def setMaxFrame(self, maxFrame): self.maxFrame = maxFrame self.panel.updateMaxFrame(maxFrame) # self.recordedData = [None]*(self.maxFrame+1) self.sceneStates1 = [None]*(self.maxFrame+1) self.sceneStates2 = [None]*(self.maxFrame+1) def setCurrentFrame(self, frame): self.frame = frame self.panel.updateFrame(frame) def getCurrentFrame(self): return self.frame def onTimer(self): # if self.initialUpdate: # self.saveInitStates() # self.loadInitStates() # self.initialUpdate = False if self.playing: self.frame += 1 if self.frame > self.maxFrame: # self.frame = 0 self.frame = self.maxFrame self.playing = False self.onFrame(self.frame) if self.timeInterval: Fl.repeat_timeout(self.timeInterval, self.onTimer) def preFrameCallback_Always(self, frame): pass def preFrameCallback(self, frame): pass def simulateCallback(self, frame): pass def postFrameCallback(self, frame): pass def postFrameCallback_Always(self, frame): pass # onFrame -1 def loadInitStates(self): self.glWindow1.setState(self.initSceneState1) self.glWindow2.setState(self.initSceneState2) self.panel.updateFrame(self.frame) self.glWindow1.redraw() self.glWindow2.redraw() def saveInitStates(self): self.initSceneState1 = self.glWindow1.getState() self.initSceneState2 = self.glWindow2.getState() def onFrame(self, frame): if self.motionSystem: self.motionSystem.updateFrame(frame) self.preFrameCallback_Always(frame) # print '[FRAMELOG]onFrame', frame if self.recording: if self.sceneStates1[frame] is None: if frame == 0 or self.sceneStates1[self.frame-1]!=None: self.preFrameCallback(frame) self.simulateCallback(frame) self.postFrameCallback(frame) self.saveFrameStates(frame) self.glWindow1.setState(self.sceneStates1[frame]) self.glWindow2.setState(self.sceneStates2[frame]) self.maxRecordedFrame = frame self.panel.updateRecordedFrame(self.maxRecordedFrame) else: self.glWindow1.setState(None) self.glWindow2.setState(None) else: self.loadFrameStates(frame) else: self.preFrameCallback(frame) self.simulateCallback(frame) self.postFrameCallback(frame) self.glWindow1.setState(None) self.glWindow2.setState(None) self.postFrameCallback_Always(frame) self.panel.updateFrame(self.frame) self.glWindow1.redraw() self.glWindow2.redraw() def saveFrameStates(self, frame): self.sceneStates1[frame]= self.glWindow1.getState() self.sceneStates2[frame]= self.glWindow2.getState() def loadFrameStates(self, frame): self.glWindow1.setState(self.sceneStates1[frame]) self.glWindow2.setState(self.sceneStates2[frame]) def deleteFrameStates(self, frame): # print '[FRAMELOG]deletelist', frame self.glWindow1.deleteState(self.sceneStates1[frame]) self.glWindow2.deleteState(self.sceneStates2[frame]) self.sceneStates1[frame] = None self.sceneStates2[frame] = None def startTimer(self, timeInterval): Fl.add_timeout(0.0, self.onTimer) self.timeInterval = timeInterval def endTimer(self): self.timeInterval = None Fl.remove_timeout(self.onTimer) def setTimeInterval(self, timeInterval): self.timeInterval = timeInterval def show(self): Fl_Window.show(self) # ybu.BaseWnd.show(self) self.glWindow1.show() self.glWindow2.show() self.panel.show() def isPlaying(self): return self.playing def play(self): self.playing = True def pause(self): self.playing = False def record(self, recordingOn): self.recording = recordingOn if recordingOn==False: self.resetRecFrom(0) self.panel.updateControl(self.loaded) def resetRecFrom(self, startFrame): for frame in range(startFrame+1, len(self.sceneStates1)): if self.sceneStates1[frame]: self.deleteFrameStates(frame) self.maxRecordedFrame = startFrame self.panel.updateRecordedFrame(self.maxRecordedFrame) def goToFrame(self, frame): self.frame = frame if frame==-1: self.loadInitStates() else: self.onFrame(frame) #class MultiViewer(ybu.BaseWnd): # def __init__(self, rect=None, title='MultiViewer'): # ybu.BaseWnd.__init__(self, rect, title, MultiSetting()) # self.doc = SimpleDoc() # self.begin() # self.motionViewWnd1 = MotionViewWnd(0, 0, self.w()/2, self.h(), self.doc) # self.motionViewWnd2 = MotionViewWnd(self.w(), 0, self.w()/2, self.h(), self.doc) # self.end() ## self.resizable(self.motionViewWnd1) # self.size_range(600, 400) # def startTimer(self, timeInterval): # self.motionViewWnd1.startTimer(timeInterval) # def endTimer(self): # self.motionViewWnd1.endTimer() # def setTimeInterval(self, timeInterval): # self.motionViewWnd1.setTimeInterval(timeInterval) # def show(self): # ybu.BaseWnd.show(self) # self.motionViewWnd1.show() # def setPreFrameCallback(self, callback): # self.motionViewWnd1.preFrameCallback = callback # def setPreFrameCallback_Always(self, callback): # self.motionViewWnd1.preFrameCallback_Always = callback # def setSimulateCallback(self, callback): # self.motionViewWnd1.simulateCallback = callback # def setPostFrameCallback(self, callback): # self.motionViewWnd1.postFrameCallback = callback # def setPostFrameCallback_Always(self, callback): # self.motionViewWnd1.postFrameCallback_Always = callback # def setExtraDrawCallback(self, callback): # self.motionViewWnd1.glWindow.extraDrawCallback = callback ## def setRecSimulObjs(self, objs): ## self.motionViewWnd1.setRecSimulObjs(objs) # def getMaxFrame(self): # return self.motionViewWnd1.getMaxFrame() # def setMaxFrame(self, maxFrame): # self.motionViewWnd1.setMaxFrame(maxFrame) # def record(self, bRec): # self.motionViewWnd1.record(bRec) # def play(self): # self.motionViewWnd1.play() # def setCurrentFrame(self, frame): # self.motionViewWnd1.setCurrentFrame(frame) # def getCurrentFrame(self): # return self.motionViewWnd1.getCurrentFrame() # def setCameraTarget(self, targetPos): # self.motionViewWnd1.glWindow.camera.center[0] = targetPos[0] # self.motionViewWnd1.glWindow.camera.center[2] = targetPos[2] # def initialize(self): # self.doc.initialize() # self.motionViewWnd1.initialize() # #class SimpleDoc(ybu.Subject): # def __init__(self): # ybu.Subject.__init__(self) # # self.rendererNames = [] # self.rendererMap = {} # self.renderersVisible = {} # # self.motionNames = [] # self.motionMap = {} # self.motionSystem = ym.MotionSystem() # # self.objectNames = [] # self.objectMap = {} # self.selectedObject = None # def initialize(self): # self.removeAllRenderers() # self.removeAllObjects() # def removeAllRenderers(self): # del self.rendererNames[:] # self.rendererMap.clear() # self.renderersVisible.clear() # self.notify(EV_addRenderer) # def removeAllObjects(self): # del self.objectNames[:] # self.objectMap.clear() # self.motionSystem.removeAllMotions() # def addRenderer(self, name, ys_renderer, visible=True): # self.rendererNames.append(name) # self.rendererMap[name] = ys_renderer # self.renderersVisible[name] = visible # self.notify(EV_addRenderer) # def setRendererVisible(self, name, visible): # self.renderersVisible[name] = visible # self.notify(EV_setRendererVisible) # def getVisibleRenderers(self): # ls = [] # for name in self.rendererNames: # if self.renderersVisible[name]: # ls.append(self.rendererMap[name]) # return ls # def addObject(self, name, object): # self.objectNames.append(name) # self.objectMap[name] = object # if isinstance(object, ym.Motion): # self.motionSystem.addMotion(object) # self.notify(EV_addObject) # def selectObjectElement(self, element): # for ys_renderer in self.rendererMap.values(): # ys_renderer.selectedElement = element # self.notify(EV_selectObjectElement) # def selectObject(self, objectName): # self.selectedObject = self.objectMap[objectName] # self.notify(EV_selectObject) # #class MotionViewWnd(yv3.MotionViewer, ybu.Observer): # def __init__(self, x, y, w, h, doc): # yv3.MotionViewer.__init__(self, x, y, w, h) # self.doc = doc # self.doc.attach(self) # def update(self, ev, doc): # if ev==EV_addRenderer or ev==EV_setRendererVisible: # self.setRenderers(doc.getVisibleRenderers()) # elif ev==EV_addObject: # self.setMotionSystem(doc.motionSystem) # self.setStateObjects(doc.objectMap.values()) # self.glWindow.redraw() if __name__=='__main__': import psyco; psyco.full() import time import Resource.ysMotionLoader as yf import Renderer.ysRenderer as yr import Resource.ysOgreDataLoader as yol import Mesh.ysMeshUtil as ysu # def test_MultiViewer(): # pointMotion = yf.readTrcFile('../samples/Day7_Session2_Take01_-_walk.trc', .01) # jointMotion = yf.readBvhFile('../samples/wd2_WalkSameSame00.bvh', .01) # # print 'pointSkeleton' # print pointMotion[0].skeleton # print 'jointSkeleton' # print jointMotion[0].skeleton # # viewer = () # viewer.record(False) # viewer.doc.addRenderer('pointMotion', yr.PointMotionRenderer(pointMotion, (0,255,0))) # viewer.doc.addObject('pointMotion', pointMotion) # viewer.doc.addRenderer('jointMotion', yr.JointMotionRenderer(jointMotion, (0,255,0))) # viewer.doc.addObject('jointMotion', jointMotion) # # viewer.startTimer(1/pointMotion.fps) # viewer.show() # # Fl.run() def test_MultiViewer(): import Motion.ysMotion as ym import Resource.ysMotionLoader as yf import Renderer.ysRenderer as yr mmFilePath = '../samples/physics2_WalkSameSame01.mm' pointMotion = yf.readMMFile(mmFilePath) frameTime = 1./30. motionSystem = ym.MotionSystem() motionSystem.addMotion(pointMotion) renderers = [] renderers.append(yr.PointMotionRenderer(pointMotion)) # viewer = MultiViewer() # viewer = MultiViewer(800, 655) # viewer = MultiViewer(800, 655, True) viewer = MultiViewer(1600, 1255) # viewer = Viewer(100, 100, 800, 650, motionSystem, renderers) # viewer.startTimer(frameTime) viewer.show() Fl.run() pass test_MultiViewer()
{ "repo_name": "queid7/hma", "path": "modules/ys_gui/ysMultiViewer.py", "copies": "1", "size": "19456", "license": "mit", "hash": 1407431874492637200, "line_mean": 35.7113207547, "line_max": 126, "alpha_frac": 0.6123046875, "autogenerated": false, "ratio": 3.4867383512544805, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.459904303875448, "avg_score": null, "num_lines": null }