code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
import numpy as np from hamilton_path import HamiltonPath import matplotlib.pyplot as plt import time def generate_matrix_special(N): path = np.random.choice(N, N, replace=False) X = np.zeros((N, 3)) for i in range(N): X[path[i]][0] = 10 * i X[path[i]][1] = np.random.normal(scale=5) X[path[i]][2] = np.random.normal(scale=5) dist = np.zeros((N, N)) for i in range(N): for j in range(N): dist[i][j] = np.linalg.norm(X[i] - X[j]) best = 0 for i in range(N - 1): best += dist[path[i]][path[i + 1]] return dist, best def generate_matrix(N): dist = np.zeros((N, N)) for i in range(N): for j in range(i + 1, N): dist[i][j] = dist[j][i] = np.random.uniform(0, 1) return dist def solve(dist, mode): start_time = time.time() hp = HamiltonPath(dist) print(mode + "...") if mode == "LKH": hp.solve_lkh() elif mode == "annealing": hp.solve_annealing(steps=10000000) return hp.path_weight(), time.time() - start_time N_range = range(5, 200) q_lkh = [] t_lkh = [] t_ann = [] for N in N_range: print(N) dist = generate_matrix(N) weight_lkh, time_lkh = solve(dist, "LKH") weight_ann, time_ann = solve(dist, "annealing") print(weight_ann / weight_lkh) q_lkh.append(weight_ann / weight_lkh) t_lkh.append(time_lkh) t_ann.append(time_ann) plt.plot(N_range, q_lkh) plt.xlabel("N", fontsize=15) plt.ylabel("NDS(Annealing)/NDS(LKH)", fontsize=15) plt.savefig("lkh_vs_annealing_quality.eps", bbox_inches='tight') plt.clf() plt.plot(N_range, t_lkh, label="LKH") plt.plot(N_range, t_ann, label="Annealing") plt.xlabel("N", fontsize=15) plt.ylabel("Time, s", fontsize=15) plt.legend(loc='best', fontsize=15) # plt.show() plt.savefig("lkh_vs_annealing_time.eps", bbox_inches='tight')
bigartm/visartm
algo/arranging/lkh_test.py
Python
bsd-3-clause
1,860
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class IndustryType(Document): pass
indictranstech/focal-erpnext
selling/doctype/industry_type/industry_type.py
Python
agpl-3.0
265
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUEgressAuditACLEntryTemplatesFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUEgressAuditACLTemplate(NURESTObject): """ Represents a EgressAuditACLTemplate in the VSD Notes: An egress audit policy is a set of rules defining how network traffic is monitored and mirrored from a domain for Audit purposes """ __rest_name__ = "egressauditacltemplate" __resource_name__ = "egressauditacltemplates" ## Constants CONST_POLICY_STATE_DRAFT = "DRAFT" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_PRIORITY_TYPE_TOP_AUDIT = "TOP_AUDIT" CONST_POLICY_STATE_LIVE = "LIVE" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" def __init__(self, **kwargs): """ Initializes a EgressAuditACLTemplate instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> egressauditacltemplate = NUEgressAuditACLTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'EgressAuditACLTemplate') >>> egressauditacltemplate = NUEgressAuditACLTemplate(data=my_dict) """ super(NUEgressAuditACLTemplate, self).__init__() # Read/Write Attributes self._name = None self._last_updated_by = None self._last_updated_date = None self._active = None self._default_allow_ip = None self._default_allow_non_ip = None self._default_install_acl_implicit_rules = None self._description = None self._embedded_metadata = None self._entity_scope = None self._policy_state = None self._creation_date = None self._priority = None self._priority_type = None self._associated_live_entity_id = None self._associated_virtual_firewall_policy_id = None self._auto_generate_priority = None self._owner = None self._external_id = None self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="default_install_acl_implicit_rules", remote_name="defaultInstallACLImplicitRules", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="policy_state", remote_name="policyState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DRAFT', u'LIVE']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=True) self.expose_attribute(local_name="priority_type", remote_name="priorityType", attribute_type=str, is_required=False, is_unique=True, choices=[u'TOP_AUDIT']) self.expose_attribute(local_name="associated_live_entity_id", remote_name="associatedLiveEntityID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="associated_virtual_firewall_policy_id", remote_name="associatedVirtualFirewallPolicyID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) # Fetchers self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.egress_audit_acl_entry_templates = NUEgressAuditACLEntryTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) # Properties @property def name(self): """ Get name value. Notes: The name of the entity """ return self._name @name.setter def name(self, value): """ Set name value. Notes: The name of the entity """ self._name = value @property def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ self._last_updated_by = value @property def last_updated_date(self): """ Get last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): """ Set last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ self._last_updated_date = value @property def active(self): """ Get active value. Notes: If enabled, it means that this ACL or QOS entry is active """ return self._active @active.setter def active(self, value): """ Set active value. Notes: If enabled, it means that this ACL or QOS entry is active """ self._active = value @property def default_allow_ip(self): """ Get default_allow_ip value. Notes: If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries This attribute is named `defaultAllowIP` in VSD API. """ return self._default_allow_ip @default_allow_ip.setter def default_allow_ip(self, value): """ Set default_allow_ip value. Notes: If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries This attribute is named `defaultAllowIP` in VSD API. """ self._default_allow_ip = value @property def default_allow_non_ip(self): """ Get default_allow_non_ip value. Notes: If enabled, non ip traffic will be dropped This attribute is named `defaultAllowNonIP` in VSD API. """ return self._default_allow_non_ip @default_allow_non_ip.setter def default_allow_non_ip(self, value): """ Set default_allow_non_ip value. Notes: If enabled, non ip traffic will be dropped This attribute is named `defaultAllowNonIP` in VSD API. """ self._default_allow_non_ip = value @property def default_install_acl_implicit_rules(self): """ Get default_install_acl_implicit_rules value. Notes: If enabled, implicit rule will allow intra domain traffic by default This attribute is named `defaultInstallACLImplicitRules` in VSD API. """ return self._default_install_acl_implicit_rules @default_install_acl_implicit_rules.setter def default_install_acl_implicit_rules(self, value): """ Set default_install_acl_implicit_rules value. Notes: If enabled, implicit rule will allow intra domain traffic by default This attribute is named `defaultInstallACLImplicitRules` in VSD API. """ self._default_install_acl_implicit_rules = value @property def description(self): """ Get description value. Notes: A description of the entity """ return self._description @description.setter def description(self, value): """ Set description value. Notes: A description of the entity """ self._description = value @property def embedded_metadata(self): """ Get embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): """ Set embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ self._embedded_metadata = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def policy_state(self): """ Get policy_state value. Notes: None This attribute is named `policyState` in VSD API. """ return self._policy_state @policy_state.setter def policy_state(self, value): """ Set policy_state value. Notes: None This attribute is named `policyState` in VSD API. """ self._policy_state = value @property def creation_date(self): """ Get creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ return self._creation_date @creation_date.setter def creation_date(self, value): """ Set creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ self._creation_date = value @property def priority(self): """ Get priority value. Notes: The priority of the ACL entry that determines the order of entries """ return self._priority @priority.setter def priority(self, value): """ Set priority value. Notes: The priority of the ACL entry that determines the order of entries """ self._priority = value @property def priority_type(self): """ Get priority_type value. Notes: Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack This attribute is named `priorityType` in VSD API. """ return self._priority_type @priority_type.setter def priority_type(self, value): """ Set priority_type value. Notes: Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack This attribute is named `priorityType` in VSD API. """ self._priority_type = value @property def associated_live_entity_id(self): """ Get associated_live_entity_id value. Notes: In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null. This attribute is named `associatedLiveEntityID` in VSD API. """ return self._associated_live_entity_id @associated_live_entity_id.setter def associated_live_entity_id(self, value): """ Set associated_live_entity_id value. Notes: In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null. This attribute is named `associatedLiveEntityID` in VSD API. """ self._associated_live_entity_id = value @property def associated_virtual_firewall_policy_id(self): """ Get associated_virtual_firewall_policy_id value. Notes: The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation This attribute is named `associatedVirtualFirewallPolicyID` in VSD API. """ return self._associated_virtual_firewall_policy_id @associated_virtual_firewall_policy_id.setter def associated_virtual_firewall_policy_id(self, value): """ Set associated_virtual_firewall_policy_id value. Notes: The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation This attribute is named `associatedVirtualFirewallPolicyID` in VSD API. """ self._associated_virtual_firewall_policy_id = value @property def auto_generate_priority(self): """ Get auto_generate_priority value. Notes: This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries. This attribute is named `autoGeneratePriority` in VSD API. """ return self._auto_generate_priority @auto_generate_priority.setter def auto_generate_priority(self, value): """ Set auto_generate_priority value. Notes: This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries. This attribute is named `autoGeneratePriority` in VSD API. """ self._auto_generate_priority = value @property def owner(self): """ Get owner value. Notes: Identifies the user that has created this object. """ return self._owner @owner.setter def owner(self, value): """ Set owner value. Notes: Identifies the user that has created this object. """ self._owner = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value
nuagenetworks/vspk-python
vspk/v6/nuegressauditacltemplate.py
Python
bsd-3-clause
21,660
"""Keep this as simple as possible to minimize the possability of error when used within a django settings.py file""" import sys import os def get(var_name, default=False, verbosity=0): """ Get the environment variable or assume a default, but let the user know about the error.""" try: value = os.environ[var_name] if str(value).strip().lower() in ['false', 'no', 'off' '0', 'none', 'null']: return None return value except: if verbosity >= 0: msg = "Unable to find the %s environment variable.\nUsing the value %s (the default) instead.\n" % (var_name, default) if verbosity > 0: from traceback import format_exc sys.stderr.write(format_exc()) sys.stderr.write(msg) return default
hobson/pug-nlp
pug/nlp/env.py
Python
mit
816
#!/usr/bin/python # -*- coding: utf-8 -*- from datetime import datetime from decimal import ROUND_HALF_EVEN import pytest from Registry.RegistryParse import parse_timestamp, parse_windows_timestamp def test_parse_windows_timestamp(): tests = { # Rounding error in old floating point calculation, which gave 2016-7-14 10:40:00.041864) 131132256000418650: datetime(2016, 7, 17, 10, 40, 0, 41865), # This actually rounds up to microseconds=041866 using 64-bit floating point arithmetic 131132256000418654: datetime(2016, 7, 17, 10, 40, 0, 41865), # Unix epoch 116444736000000000: datetime(1970, 1, 1, 0, 0, 0, 0), # Rounding up to next second 116444736009999996: datetime(1970, 1, 1, 0, 0, 1, 0), # Rounding the last digit which doesn't fit into datetime.microseconds 116444736000000006: datetime(1970, 1, 1, 0, 0, 0, 1), # round up to even 116444736000000015: datetime(1970, 1, 1, 0, 0, 0, 2), # round down to even 116444736000000005: datetime(1970, 1, 1, 0, 0, 0, 0), } for timestamp, expected in tests.items(): actual = parse_windows_timestamp(timestamp) assert expected == actual # HFS timestamps are seconds + 65535ths of seconds since 1 Jan 1904 HFS_EPOCH = datetime(1904, 1, 1) HFS_RESOLUTION = 65535 # Mac absolute timestamps are seconds since 1 Jan 2001 MAC_EPOCH = datetime(2001, 1, 1) MAC_RESOLUTION = 1 # NTFS timestamps are hundreds of nanoseconds since 1 Jan 1601 NTFS_EPOCH = datetime(1601, 1, 1) NTFS_RESOLUTION = int(1e7) # UNIX timestamps are seconds since 1 Jan 1970 UNIX_EPOCH = datetime(1970, 1, 1) UNIX_RESOLUTION = 1 HFS_TESTS = { # least HFS timestamp 0: datetime(1904, 1, 1, 0, 0, 0, 0), # least nonzero HFS timestamp 1: datetime(1904, 1, 1, 0, 0, 0, 15), 65535: datetime(1904, 1, 1, 0, 0, 1), 136496402790465: datetime(1969, 12, 31, 11, 59, 59), 136496402856000: datetime(1969, 12, 31, 12, 0, 0), 136499233968000: datetime(1970, 1, 1, 0, 0, 0), 233401598681175: datetime(2016, 11, 8, 20, 1, 45), 233401598707098: datetime(2016, 11, 8, 20, 1, 45, 395560), # greatest "low" timestamp 281470681743360: datetime(2040, 2, 6, 6, 28, 16), 514872280424535: datetime(2152, 12, 16, 2, 30, 1), # greatest HFS timestamp representable as a datetime 16743219016895999: datetime(9999, 12, 31, 23, 59, 59, 999985) } MAC_TESTS = { # least Mac absolute timestamp 0: datetime(2001, 1, 1, 0, 0, 0), # least nonzero Mac absolute timestamp 1: datetime(2001, 1, 1, 0, 0, 1), 307828812: datetime(2010, 10, 3, 20, 0, 12), } NTFS_TESTS = { # least NTFS timestamp 0: datetime(1601, 1, 1, 0, 0, 0, 0), # least nonzero NTFS timestamp 1: datetime(1601, 1, 1, 0, 0, 0, 0), # least nonzero NTFS timestamp which doesn't round to the epoch 10: datetime(1601, 1, 1, 0, 0, 0, 1), 131467743999999999: datetime(2017, 8, 9, 17, 46, 40), # greatest NTFS timestamp representable as a datetime 2650467743999999994: datetime(9999, 12, 31, 23, 59, 59, 999999) } UNIX_TESTS = { # least signed 32-bit UNIX timestamp -2147483648: datetime(1901, 12, 13, 20, 45, 52), # least nonnegative UNIX timestamp 0: datetime(1970, 1, 1, 0, 0, 0), # least nonzero UNIX timestamp 1: datetime(1970, 1, 1, 0, 0, 1), 1516799714: datetime(2018, 1, 24, 13, 15, 14), # greatest signed 32-bit UNIX timestamp 2147483647: datetime(2038, 1, 19, 3, 14, 7) } TEST_SETS = [ (HFS_TESTS, HFS_RESOLUTION, HFS_EPOCH, ROUND_HALF_EVEN), (MAC_TESTS, MAC_RESOLUTION, MAC_EPOCH, ROUND_HALF_EVEN), (NTFS_TESTS, NTFS_RESOLUTION, NTFS_EPOCH, ROUND_HALF_EVEN), (UNIX_TESTS, UNIX_RESOLUTION, UNIX_EPOCH, ROUND_HALF_EVEN) ] TEST_CONFIGS = [] for tests, resolution, epoch, mode in TEST_SETS: for tics, expected in tests.items(): TEST_CONFIGS.append((expected, tics, resolution, epoch, mode)) @pytest.mark.parametrize('expected,tics,resolution,epoch,mode', TEST_CONFIGS) def test_parse_timestamp(expected, tics, resolution, epoch, mode): actual = parse_timestamp(tics, resolution, epoch, mode=mode) assert expected == actual
williballenthin/python-registry
tests/test_parse_timestamp.py
Python
apache-2.0
4,203
# import the basic python packages we need import os import sys import tempfile import pprint import traceback # disable python from generating a .pyc file sys.dont_write_bytecode = True # change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API pytan_loc = "~/gh/pytan" pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib') # Determine our script name, script dir my_file = os.path.abspath(sys.argv[0]) my_dir = os.path.dirname(my_file) # try to automatically determine the pytan lib directory by assuming it is in '../../lib/' parent_dir = os.path.dirname(my_dir) pytan_root_dir = os.path.dirname(parent_dir) lib_dir = os.path.join(pytan_root_dir, 'lib') # add pytan_loc and lib_dir to the PYTHONPATH variable path_adds = [lib_dir, pytan_static_path] [sys.path.append(aa) for aa in path_adds if aa not in sys.path] # import pytan import pytan # create a dictionary of arguments for the pytan handler handler_args = {} # establish our connection info for the Tanium Server handler_args['username'] = "Administrator" handler_args['password'] = "Tanium2015!" handler_args['host'] = "10.0.1.240" handler_args['port'] = "443" # optional # optional, level 0 is no output except warnings/errors # level 1 through 12 are more and more verbose handler_args['loglevel'] = 1 # optional, use a debug format for the logging output (uses two lines per log entry) handler_args['debugformat'] = False # optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES # very useful for capturing the full exchange of XML requests and responses handler_args['record_all_requests'] = True # instantiate a handler using all of the arguments in the handler_args dictionary print "...CALLING: pytan.handler() with args: {}".format(handler_args) handler = pytan.Handler(**handler_args) # print out the handler string print "...OUTPUT: handler string: {}".format(handler) # setup the arguments for the handler() class kwargs = {} kwargs["export_format"] = u'json' kwargs["explode_json_string_values"] = u'bad' # setup the arguments for handler.get() get_kwargs = { 'name': [ "Computer Name", "IP Route Details", "IP Address", 'Folder Contents', ], 'objtype': 'sensor', } # get the objects that will provide the basetype that we want to use print "...CALLING: handler.get() with args: {}".format(get_kwargs) response = handler.get(**get_kwargs) # store the basetype object as the obj we want to export kwargs['obj'] = response # export the object to a string print "...CALLING: handler.export_obj() with args {}".format(kwargs) try: handler.export_obj(**kwargs) except Exception as e: print "...EXCEPTION: {}".format(e) # this should throw an exception of type: pytan.exceptions.HandlerError # uncomment to see full exception # traceback.print_exc(file=sys.stdout)
tanium/pytan
BUILD/doc/source/examples/invalid_export_basetype_json_bad_explode_type_code.py
Python
mit
2,873
"""Functions for discovering and executing various cookiecutter hooks.""" import errno import logging import os import subprocess # nosec import sys import tempfile from cookiecutter import utils from cookiecutter.environment import StrictEnvironment from cookiecutter.exceptions import FailedHookException logger = logging.getLogger(__name__) _HOOKS = [ 'pre_gen_project', 'post_gen_project', ] EXIT_SUCCESS = 0 def valid_hook(hook_file, hook_name): """Determine if a hook file is valid. :param hook_file: The hook file to consider for validity :param hook_name: The hook to find :return: The hook file validity """ filename = os.path.basename(hook_file) basename = os.path.splitext(filename)[0] matching_hook = basename == hook_name supported_hook = basename in _HOOKS backup_file = filename.endswith('~') return matching_hook and supported_hook and not backup_file def find_hook(hook_name, hooks_dir='hooks'): """Return a dict of all hook scripts provided. Must be called with the project template as the current working directory. Dict's key will be the hook/script's name, without extension, while values will be the absolute path to the script. Missing scripts will not be included in the returned dict. :param hook_name: The hook to find :param hooks_dir: The hook directory in the template :return: The absolute path to the hook script or None """ logger.debug('hooks_dir is %s', os.path.abspath(hooks_dir)) if not os.path.isdir(hooks_dir): logger.debug('No hooks/dir in template_dir') return None scripts = [] for hook_file in os.listdir(hooks_dir): if valid_hook(hook_file, hook_name): scripts.append(os.path.abspath(os.path.join(hooks_dir, hook_file))) if len(scripts) == 0: return None return scripts def run_script(script_path, cwd='.'): """Execute a script from a working directory. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. """ run_thru_shell = sys.platform.startswith('win') if script_path.endswith('.py'): script_command = [sys.executable, script_path] else: script_command = [script_path] utils.make_executable(script_path) try: proc = subprocess.Popen(script_command, shell=run_thru_shell, cwd=cwd) # nosec exit_status = proc.wait() if exit_status != EXIT_SUCCESS: raise FailedHookException( 'Hook script failed (exit status: {})'.format(exit_status) ) except OSError as os_error: if os_error.errno == errno.ENOEXEC: raise FailedHookException( 'Hook script failed, might be an empty file or missing a shebang' ) raise FailedHookException('Hook script failed (error: {})'.format(os_error)) def run_script_with_context(script_path, cwd, context): """Execute a script after rendering it with Jinja. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. :param context: Cookiecutter project template context. """ _, extension = os.path.splitext(script_path) with open(script_path, 'r', encoding='utf-8') as file: contents = file.read() with tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix=extension) as temp: env = StrictEnvironment(context=context, keep_trailing_newline=True) template = env.from_string(contents) output = template.render(**context) temp.write(output.encode('utf-8')) run_script(temp.name, cwd) def run_hook(hook_name, project_dir, context): """ Try to find and execute a hook from the specified project directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. """ scripts = find_hook(hook_name) if not scripts: logger.debug('No %s hook found', hook_name) return logger.debug('Running hook %s', hook_name) for script in scripts: run_script_with_context(script, project_dir, context)
pjbull/cookiecutter
cookiecutter/hooks.py
Python
bsd-3-clause
4,248
import logging from datetime import datetime from socket import error as SocketError from hiredis import ProtocolError import redistrib.command import config from models.base import db, commit_session from models.node import get_by_host_port as get_node_by_host_port from models.cluster import remove_empty_cluster # A task execution should returns True to indicate it's done # or False if it needs a second run # Particularly, slots migrating task may need several runs def _launch(command, host_port_list): redistrib.command.create({(a['host'], a['port']) for a in host_port_list}, max_slots=256) return True def _fix_migrating(_, host, port): redistrib.command.fix_migrating(host, port) return True def _join(_, cluster_id, cluster_host, cluster_port, newin_host, newin_port): redistrib.command.add_node(cluster_host, cluster_port, newin_host, newin_port) n = get_node_by_host_port(newin_host, newin_port) if n is None: return True n.assignee_id = cluster_id db.session.add(n) commit_session() return True def _replicate(_, cluster_id, master_host, master_port, slave_host, slave_port): redistrib.command.replicate(master_host, master_port, slave_host, slave_port) n = get_node_by_host_port(slave_host, slave_port) if n is None: return True n.assignee_id = cluster_id db.session.add(n) commit_session() return True NOT_IN_CLUSTER_MESSAGE = 'not in a cluster' def _quit(_, cluster_id, host, port): try: me = redistrib.command.list_nodes(host, port, host)[1] if len(me.assigned_slots) != 0: raise ValueError('node still holding slots') redistrib.command.quit_cluster(host, port) except SocketError, e: logging.exception(e) logging.info('Remove instance from cluster on exception') except ProtocolError, e: if NOT_IN_CLUSTER_MESSAGE not in e.message: raise remove_empty_cluster(cluster_id) n = get_node_by_host_port(host, port) if n is not None: n.assignee_id = None db.session.add(n) commit_session() return True def _migrate_slots(command, src_host, src_port, dst_host, dst_port, slots, start=0): while start < len(slots): begin = datetime.now() redistrib.command.migrate_slots(src_host, src_port, dst_host, dst_port, [slots[start]]) start += 1 if (datetime.now() - begin).seconds >= config.POLL_INTERVAL: command.args['start'] = start command.save() commit_session() return start == len(slots) return True TASK_MAP = { 'launch': _launch, 'fix_migrate': _fix_migrating, 'migrate': _migrate_slots, 'join': _join, 'replicate': _replicate, 'quit': _quit, }
HunanTV/redis-ctl
daemonutils/bgtask.py
Python
mit
2,964
''' Created on Dec 23, 2012 @author: Peter This module contains a few functions for extracting the parameters out of a man page. ''' import subprocess import logging from arguments.valuedarguments import ValuedArguments TIMEOUT = 3 logger = logging.getLogger('man-fuzzer') def mineflags(executable): '''Returns a set of progargs that can be used to generate arguments in a test case.''' # Mine the flags valuedarguments = ValuedArguments() valuedarguments.parse(_mine_h_flags(executable,TIMEOUT)) valuedarguments.parse(_mine_H_flags(executable,TIMEOUT)) valuedarguments.parse(_mine_Help_flags(executable,TIMEOUT)) valuedarguments.parse(_mine_Man_flags(executable,TIMEOUT)) return valuedarguments def _extract_arguments(command,timeout): try: child = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True) child_output = child.communicate(timeout = timeout) return repr(child_output) except Exception as e: logger.exception(e) return "" def _mine_h_flags(executable,timeout): return _extract_arguments(str(executable) + " -h",timeout) def _mine_H_flags(executable,timeout): return _extract_arguments(str(executable) + " -H",timeout) def _mine_Help_flags(executable,timeout): return _extract_arguments(str(executable) + " --help",timeout) def _mine_Man_flags(executable,timeout): return _extract_arguments("man " + str(executable),timeout)
GroundPound/ManFuzzer
manparser/__init__.py
Python
apache-2.0
1,492
#################################################################################################### # stimulus/core.py # The stimulus module of the standard cortical observer; core definitions and checks. # By Noah C. Benson import numpy as np import pyrsistent as pyr import pimms, os, sys, warnings from ..util import (lookup_labels, units) from scipy import ndimage as ndi from scipy.interpolate import (RectBivariateSpline, interp1d) from skimage import data from skimage.util import img_as_float warnings.filterwarnings('ignore', category=UserWarning, message='.*From scipy 0.13.0.*') @pimms.calc('gamma_correction_function') def calc_gamma_correction(gamma=None): ''' calc_gamma_correction is a calculator that accepts an optional argument gamma and provides a value gamma_correction_function that corrects the contrast of a stimulus presentation. Optional afferent values: @ gamma May be given, in which case it must be one of: - an (n x 2) or (2 x n) matrix such that is equivalent to (potentially after transposition) a matrix of (x,y) values where x is the input gamma and y is the corrected gamma - a vector of corrected gamma values; if the vector u is of length n, then this is equivalent to passing a matrix in which the y-values are the elements of u and the x-values are evenly spaced values that cover the interval [0,1]; accordingly there must be at least 2 elements - a function that accepts a number between 0 and 1 and returns the corrected gamma By default this is None, and no gamma correction is applied. ''' # First, setup the stimulus_gamma correction: if gamma is None: return lambda x: x elif hasattr(gamma, '__call__'): return gamma elif hasattr(gamma, '__iter__'): vals = np.array(gamma) if len(vals.shape) > 2: raise ValueError('stimulus_gamma must be 1D or 2D array') if len(vals.shape) == 1: n = float(vals.shape[0] - 1) vals = np.asarray([[float(i)/n for i in range(vals.shape[0])], vals]) # Okay, assume here that vals is nx2 or 2xn if vals.shape[1] != 2: vals = vals.T # and interpolate these return interp1d(vals[:,0], vals[:,1], kind='cubic') else: raise ValueError('Given stimulus_gamma argument has neither iter nor call attribute') def import_stimulus(stim, gcf): ''' import_stimulus(stim, gcf) yields the imported image for the given stimulus argument stim; stim may be either a filename or an image array; the argument gcf must be the gamma correction function. ''' if isinstance(stim, basestring): im = np.asarray(data.load(stim), dtype=np.float) else: im = np.asarray(stim, dtype=np.float) if len(im.shape) == 3: # average the color channels im = np.mean(im, axis=2) if len(im.shape) != 2: raise ValueError('images must be 2D or 3D matrices') # We need to make sure this image is between 0 and 1; if not, we assume it's between 0 and 255; # for now it seems safe to automatically detect this mx = np.max(im) if not np.isclose(mx, 1) and mx > 1: im = im/255.0 # if we were given a color image, if gcf is not None: im = gcf(im) return im @pimms.calc('stimulus_map', 'stimulus_ordering', cache=True) def import_stimuli(stimulus, gamma_correction_function): ''' import_stimuli is a calculation that ensures that the stimulus images to be used in the sco calculation are properly imported. Required afferent values: @ stimulus May either be a dict or list of images matrices or a list of image filenames. Optional afferent values: @ gamma_correction_function May specifies how gamma should be corrected; this should usually be provided via the gamma argument (see calc_gamma_correction and gamma). Efferent output values: @ stimulus_map Will be a persistent dict whose keys are the image identifiers and whose values are the image matrices of the imported stimuli prior to normalization or any processing. @ stimulus_ordering Will be a persistent vector of the keys of stimulus_map in the order provided. ''' # Make this into a map so that we have ids and images/filenames if not pimms.is_map(stimulus): # first task: turn this into a map if isinstance(stimulus, basestring): stimulus = {stimulus: stimulus} order = [stimulus] elif hasattr(stimulus, '__iter__'): pat = '%%0%dd' % (int(np.log10(len(stimulus))) + 1) order = [(pat % i) for i in range(len(stimulus))] stimulus = {(pat % i):s for (i,s) in enumerate(stimulus)} else: raise ValueError('stimulus is not iterable nor a filename') else: order = stimulus.keys() # we can use the stimulus_importer function no matter what the stimulus arguments are stim_map = {k:import_stimulus(v, gamma_correction_function) for (k,v) in stimulus.iteritems()} for u in stim_map.itervalues(): u.setflags(write=False) return {'stimulus_map': pyr.pmap(stim_map), 'stimulus_ordering': pyr.pvector(order)} def image_apply_aperture(im, radius, center=None, fill_value=0.5, edge_width=10, crop=True): ''' image_apply_aperture(im, rad) yields an image that has been given a circular aperture centered at the middle of the image im with the given radius rad in pixels. The following options may be given: * fill_value (default 0.5) gives the value filled in outside of the aperture * crop (default True) indicates whether the image should be cropped after the aperture is applied; possible values are a tuple (r,c) indicating the desired size of the resulting image; an integer r, equivalent to (r,r); or True, in which case the image is cropped such that it just holds the entire aperture (including any smoothed edge). * edge_width (default 10) gives the number of pixels over which the aperture should be smoothly extended; 0 gives a hard edge, otherwise a half-cosine smoothing is used. * center (default None) gives the center of the aperture as a (row, column) value; None uses the center of the image. ''' im = np.asarray(im) # First, figure out the final image size crop = 2*radius if crop is True else crop final_sz = crop if isinstance(crop, (tuple, list)) else (crop, crop) final_sz = [int(round(x)) for x in final_sz] final_im = np.full(final_sz, fill_value) # figure out the centers center = (0.5*im.shape[0], 0.5*im.shape[1]) if center is None else center final_center = (0.5*final_im.shape[0], 0.5*final_im.shape[1]) # we may have to interpolate pixels, so setup the interpolation; (0,0) in the lower-left: interp = RectBivariateSpline(range(im.shape[0]), range(im.shape[1]), im) # prepare to interpolate: find the row/col values for the pixels into which we interpolate rad2 = radius**2 final_xy = [(x,y) for x in range(final_im.shape[0]) for xx in [(x - final_center[0])**2] for y in range(final_im.shape[1]) for yy in [(y - final_center[1])**2] if xx + yy <= rad2] f2i = float(2 * radius) / float(final_sz[0]) image_xy = [(x,y) for xy in final_xy for (dx,dy) in [(xy[0] - final_center[0], xy[1] - final_center[1])] for (x,y) in [(dx*f2i + center[0], dy*f2i + center[1])]] final_xy = np.transpose(final_xy) image_xy = np.transpose(image_xy) # pull the interpolated values out of the interp structure: z = interp(image_xy[0], image_xy[1], grid=False) # and put these values into the final image for ((x,y),z) in zip(final_xy.T, z): final_im[x,y] = z # now, take care of the edge if edge_width is 0: return final_im erad2 = (radius - edge_width)**2 for r in range(final_im.shape[0]): for c in range(final_im.shape[1]): r0 = float(r) - final_center[0] c0 = float(c) - final_center[1] d0 = r0*r0 + c0*c0 if d0 > erad2 and d0 <= rad2: d0 = np.sqrt(d0) - radius + edge_width w = 0.5*(1.0 + np.cos(d0 * np.pi / edge_width)) final_im[r,c] = w*final_im[r,c] + (1.0 - w)*fill_value # That's it! return final_im @pimms.calc('image_array', 'image_names', 'pixel_centers', cache=True) def calc_images(pixels_per_degree, stimulus_map, stimulus_ordering, background=0.5, aperture_radius=None, aperture_edge_width=None, normalized_pixels_per_degree=None): ''' calc_images() is a the calculation that converts the imported_stimuli value into the normalized images value. Required afferent parameters: @ pixels_per_degree Must specify the number of pixels per degree in the input images; note that all stimulus images must have the same pixels_per_degree value. @ stimulus_map Must be a map whose values are 2D image matrices (see import_stimuli). @ stimulus_ordering Must be a list of the stimulus filenames or IDs (used by calc_images to ensure the ordering of the resulting image_array datum is correct; see also import_stimuli). Optional afferent parameters: @ background Specifies the background color of the stimulus; by default this is 0.5 (gray); this is only used if an aperture is applied. @ aperture_radius Specifies the radius of the aperture in degrees; by default this is None, indicating that no aperture should be used; otherwise the aperture is applied after normalizing the images. @ aperture_edge_width Specifies the width of the aperture edge in degrees; by default this is None; if 0 or None, then no aperture edge is used. @ normalized_pixels_per_degree Specifies the resolution of the images used in the calculation; by default this is the same as pixels_per_degree. Output efferent values: @ image_array Will be the 3D numpy array image stack; image_array[i,j,k] is the pixel in image i, row j, column k @ image_names Will be the list of image names in the same order as the images in image_array; the names are derived from the keys of the stimulus_map. @ pixel_centers Will be an r x c x 2 numpy matrix with units of degrees specifying the center of each pixel (r is the number of rows and c is the number of columns). ''' # first, let's interpret our arguments deg2px = float(pimms.mag(pixels_per_degree, 'px/deg')) if normalized_pixels_per_degree is None: normdeg2px = deg2px else: normdeg2px = float(pimms.mag(normalized_pixels_per_degree, 'px/deg')) # we can get the zoom ratio from these zoom_ratio = normdeg2px / deg2px # Zoom each image so that the pixels per degree is right: if np.isclose(zoom_ratio, 1): imgs = stimulus_map else: imgs = {k:ndi.zoom(im, zoom_ratio, cval=background) for (k,im) in stimulus_map.iteritems()} maxdims = [np.max([im.shape[i] for im in imgs.itervalues()]) for i in [0,1]] # Then apply the aperture if aperture_radius is None: aperture_radius = (0.5 * np.sqrt(np.dot(maxdims, maxdims))) / normdeg2px if aperture_edge_width is None: aperture_edge_width = 0 rad_px = 0 try: rad_px = pimms.mag(aperture_radius, 'deg') * normdeg2px except: try: rad_px = pimms.mag(aperture_radius, 'px') except: raise ValuerError('aperture_radius given in unrecognized units') aew_px = 0 try: aew_px = pimms.mag(aperture_edge_width, 'deg') * normdeg2px except: try: aew_px = pimms.mag(aperture_edge_width, 'px') except: raise ValuerError('aperture_edge_width given in unrecognized units') bg = background imgs = {k:image_apply_aperture(im, rad_px, fill_value=bg, edge_width=aew_px) for (k,im) in imgs.iteritems()} # Separate out the images and names and imar = np.asarray([imgs[k] for k in stimulus_ordering], dtype=np.float) imar.setflags(write=False) imnm = pyr.pvector(stimulus_ordering) # Finally, note the pixel centers (rs,cs) = (imar.shape[1], imar.shape[2]) x0 = (0.5*rs, 0.5*cs) (r0s, c0s) = [(np.asarray(range(u)) - 0.5*u + 0.5) / deg2px for u in [rs,cs]] pxcs = np.asarray([[(c,-r) for c in c0s] for r in r0s], dtype=np.float) pxcs.setflags(write=False) return {'image_array': imar, 'image_names': imnm, 'pixel_centers': pxcs}
WinawerLab/sco
sco/stimulus/core.py
Python
gpl-3.0
12,847
import os import requests # pip install requests # The authentication key (API Key). # Get your own by registering at https://app.pdf.co API_KEY = "******************************************" # Base URL for PDF.co Web API requests BASE_URL = "https://api.pdf.co/v1" # Source PDF file SourceFile = ".\\sample.pdf" # Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'. Pages = "" # PDF document password. Leave empty for unprotected documents. Password = "" # Destination CSV file name DestinationFile = ".\\result.csv" # Some of advanced options available through profiles: # (JSON can be single/double-quoted and contain comments.) # { # "profiles": [ # { # "profile1": { # "CSVSeparatorSymbol": ",", // Separator symbol. # "CSVQuotaionSymbol": "\"", // Quotation symbol. # "ExtractInvisibleText": true, // Invisible text extraction. Values: true / false # "ExtractShadowLikeText": true, // Shadow-like text extraction. Values: true / false # "LineGroupingMode": "None", // Values: "None", "GroupByRows", "GroupByColumns", "JoinOrphanedRows" # "ColumnDetectionMode": "ContentGroupsAndBorders", // Values: "ContentGroupsAndBorders", "ContentGroups", "Borders", "BorderedTables" # "Unwrap": false, // Unwrap grouped text in table cells. Values: true / false # "ShrinkMultipleSpaces": false, // Shrink multiple spaces in table cells that affect column detection. Values: true / false # "DetectNewColumnBySpacesRatio": 1, // Spacing ratio that affects column detection. # "CustomExtractionColumns": [ 0, 50, 150, 200, 250, 300 ], // Explicitly specify columns coordinates for table extraction. # "CheckPermissions": true, // Ignore document permissions. Values: true / false # } # } # ] # } # Advanced Conversation Options Profiles = "{ 'profiles': [ { 'profile1': { 'TextFromImagesAndVectorsAndFonts': '|' } } ] }" def main(args = None): uploadedFileUrl = uploadFile(SourceFile) if (uploadedFileUrl != None): convertPdfToCSV(uploadedFileUrl, DestinationFile) def convertPdfToCSV(uploadedFileUrl, destinationFile): """Converts PDF To CSV using PDF.co Web API""" # Prepare requests params as JSON # See documentation: https://apidocs.pdf.co parameters = {} parameters["name"] = os.path.basename(destinationFile) parameters["password"] = Password parameters["pages"] = Pages parameters["url"] = uploadedFileUrl parameters["profiles"] = Profiles # Prepare URL for 'PDF To CSV' API request url = "{}/pdf/convert/to/csv".format(BASE_URL) # Execute request and get response as JSON response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY }) if (response.status_code == 200): json = response.json() if json["error"] == False: # Get URL of result file resultFileUrl = json["url"] # Download result file r = requests.get(resultFileUrl, stream=True) if (r.status_code == 200): with open(destinationFile, 'wb') as file: for chunk in r: file.write(chunk) print(f"Result file saved as \"{destinationFile}\" file.") else: print(f"Request error: {response.status_code} {response.reason}") else: # Show service reported error print(json["message"]) else: print(f"Request error: {response.status_code} {response.reason}") def uploadFile(fileName): """Uploads file to the cloud""" # 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE. # Prepare URL for 'Get Presigned URL' API request url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format( BASE_URL, os.path.basename(fileName)) # Execute request and get response as JSON response = requests.get(url, headers={ "x-api-key": API_KEY }) if (response.status_code == 200): json = response.json() if json["error"] == False: # URL to use for file upload uploadUrl = json["presignedUrl"] # URL for future reference uploadedFileUrl = json["url"] # 2. UPLOAD FILE TO CLOUD. with open(fileName, 'rb') as file: requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" }) return uploadedFileUrl else: # Show service reported error print(json["message"]) else: print(f"Request error: {response.status_code} {response.reason}") return None if __name__ == '__main__': main()
bytescout/ByteScout-SDK-SourceCode
PDF.co Web API/PDF To CSV API/Python/Advanced Conversation Options/ConvertPdfToCSVFromUploadedFile.py
Python
apache-2.0
4,928
# -*- coding: utf-8 -* from setuptools.command.install import install from setuptools import find_packages from setuptools import setup from sys import version_info, stderr, exit import codecs import sys import os def read(*parts): # intentionally *not* adding an encoding option to open # see here: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 with codecs.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts)) as f: return f.read() setup(name="strictyaml", version=read('VERSION').replace('\n', ''), description="Strict, typed YAML parser", long_description=read('README.md'), long_description_content_type="text/markdown", classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Text Processing :: Markup', 'Topic :: Software Development :: Libraries', 'Natural Language :: English', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], keywords='yaml', author='Colm O\'Connor', author_email='colm.oconnor.github@gmail.com', url='http://hitchdev.com/strictyaml', license='MIT', install_requires=["python-dateutil>=2.6.0", ], packages=find_packages(exclude=["tests", "docs", ]), package_data={}, zip_safe=False, include_package_data=True, )
crdoconnor/strictyaml
setup.py
Python
mit
1,845
# coding=utf-8 """Tests for medusa/post_processor.py.""" import os from medusa import app from medusa.post_processor import PostProcessor as Sut import pytest @pytest.mark.parametrize('p', [ { # p0: Subtitle with language. No subtitles dir set. 'new_path': 'media/shows/great show/season 1/', 'new_basename': 'Great Show - S01E04 - Best Episode', 'filepath': 'downloads/tv/great.show.s01e04.720p.hdtv.x264-group.en.srt', 'expected': 'media/shows/great show/season 1/Great Show - S01E04 - Best Episode.en.srt' }, { # p1: Subtitle without language. No subtitles dir set. 'new_path': 'media/shows/great show/season 1/', 'new_basename': 'Great Show - S01E04 - Best Episode', 'filepath': 'downloads/tv/great.show.s01e04.720p.hdtv.x264-group.srt', 'expected': 'media/shows/great show/season 1/Great Show - S01E04 - Best Episode.srt' }, { # p2: Subtitle with language. Absolute subtitles dir set. 'new_path': 'media/shows/great show/season 1/', 'new_basename': 'Great Show - S01E04 - Best Episode', 'filepath': 'downloads/tv/great.show.s01e04.720p.hdtv.x264-group.it.srt', 'subtitles': {'dir': 'downloads/subtitles/', 'absolute': True}, 'expected': 'Great Show - S01E04 - Best Episode.it.srt' }, { # p3: Subtitle without language. Absolute subtitles dir set. 'new_path': 'media/shows/great show/season 1/', 'new_basename': 'Great Show - S01E04 - Best Episode', 'filepath': 'downloads/tv/great.show.s01e04.720p.hdtv.x264-group.srt', 'subtitles': {'dir': 'downloads/subtitles/', 'absolute': True}, 'expected': 'Great Show - S01E04 - Best Episode.srt' }, { # p4: Subtitle with language. Relative subtitles dir set. 'new_path': 'media/shows/great show/season 1/', 'new_basename': 'Great Show - S01E04 - Best Episode', 'filepath': 'downloads/tv/great.show.s01e04.720p.hdtv.x264-group.it.srt', 'subtitles': {'dir': 'subs', 'absolute': False}, 'expected': 'media/shows/great show/season 1/subs/Great Show - S01E04 - Best Episode.it.srt' }, { # p5: Subtitle without language. Relative subtitles dir set. 'new_path': 'media/shows/great show/season 1/', 'new_basename': 'Great Show - S01E04 - Best Episode', 'filepath': 'downloads/tv/great.show.s01e04.720p.hdtv.x264-group.srt', 'subtitles': {'dir': 'subs', 'absolute': False}, 'expected': 'media/shows/great show/season 1/subs/Great Show - S01E04 - Best Episode.srt' }, { # p6: Subtitle with language. No subtitles dir set. 'new_path': 'media/shows/riko or marty/season 3/', 'new_basename': 'riko.or.marty.s03e05.1080p.web-dl', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.eng.srt', 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.eng.srt' }, { # p7: Subtitle with language. No subtitles dir set. New basename empty. 'new_path': 'media/shows/riko or marty/season 3/', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.eng.srt', 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.eng.srt' }, { # p8: Subtitle with language. No subtitles dir set. 'new_path': 'media/shows/riko or marty/season 3/', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.PT-BR.srt', 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.pt-BR.srt' }, { # p9: NFO with renaming. New basename empty. 'new_path': 'media/shows/riko or marty/season 3/', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.nfo', 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.nfo-orig' }, { # p10: NFO without renaming 'new_path': 'media/shows/riko or marty/season 3/', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.nfo', 'nfo_rename': 0, 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.nfo' }, { # p11: MKV without new basename 'new_path': 'media/shows/riko or marty/season 3/', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.mkv', 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.mkv' }, { # p12: MKV with new basename 'new_path': 'media/shows/riko or marty/season 3/', 'new_basename': 'Riko or Marty S03E05 Episode Name', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.mkv', 'expected': 'media/shows/riko or marty/season 3/Riko or Marty S03E05 Episode Name.mkv' }, { # p13: Space before subtitle extension 'new_path': 'media/shows/gomorra/season 3/', 'new_basename': 'Gomorra S03E15 Episode Name', 'filepath': 'downloads/tv/Gomorra S03 E11 - x264 .srt', 'expected': 'media/shows/gomorra/season 3/Gomorra S03E15 Episode Name.srt' }, { # p14: Subtitle with language tag 'new_path': 'media/shows/riko or marty/season 3/', 'filepath': 'downloads/tv/riko.or.marty.s03e05.1080p.web-dl.en-au.srt', 'expected': 'media/shows/riko or marty/season 3/riko.or.marty.s03e05.1080p.web-dl.en-AU.srt' }, ]) def test_rename_associated_file(p, create_dir, monkeypatch): """Test rename_associated_file.""" # Given new_path = p['new_path'] new_basename = p.get('new_basename') filepath = p['filepath'] monkeypatch.setattr(app, 'NFO_RENAME', p.get('nfo_rename', 1)) if p.get('subtitles'): # Workaround for absolute subtitles directory if p['subtitles']['absolute']: subs_dir = create_dir(p['subtitles']['dir']) monkeypatch.setattr(app, 'SUBTITLES_DIR', subs_dir) p['expected'] = os.path.join(subs_dir, p['expected']) else: monkeypatch.setattr(app, 'SUBTITLES_DIR', p['subtitles']['dir']) # When result = Sut.rename_associated_file(new_path, new_basename, filepath) # Then assert os.path.normcase(result) == os.path.normcase(p['expected'])
fernandog/Medusa
tests/test_rename_associated_file.py
Python
gpl-3.0
6,216
#!/usr/bin/env python3 # Copyright (c) 2014-2017 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Script to generate list of seed nodes for chainparams.cpp. This script expects two text files in the directory that is passed as an argument: nodes_main.txt nodes_test.txt These files must consist of lines in the format <ip> <ip>:<port> [<ipv6>] [<ipv6>]:<port> <onion>.onion 0xDDBBCCAA (IPv4 little-endian old pnSeeds format) The output will be two data structures with the peers in binary format: static SeedSpec6 pnSeed6_main[]={ ... } static SeedSpec6 pnSeed6_test[]={ ... } These should be pasted into `src/chainparamsseeds.h`. ''' from base64 import b32decode from binascii import a2b_hex import sys import os import re # ipv4 in ipv6 prefix pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff]) # tor-specific ipv6 prefix pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43]) def name_to_ipv6(addr): if len(addr)>6 and addr.endswith('.onion'): vchAddr = b32decode(addr[0:-6], True) if len(vchAddr) != 16-len(pchOnionCat): raise ValueError('Invalid onion %s' % vchAddr) return pchOnionCat + vchAddr elif '.' in addr: # IPv4 return pchIPv4 + bytearray((int(x) for x in addr.split('.'))) elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i,comp in enumerate(addr): if comp == '': if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) return bytearray(sub[0] + ([0] * nullbytes) + sub[1]) elif addr.startswith('0x'): # IPv4-in-little-endian return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:]))) else: raise ValueError('Could not parse address %s' % addr) def parse_spec(s, defaultport): match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s) if match: # ipv6 host = match.group(1) port = match.group(2) elif s.count(':') > 1: # ipv6, no port host = s port = '' else: (host,_,port) = s.partition(':') if not port: port = defaultport else: port = int(port) host = name_to_ipv6(host) return (host,port) def process_nodes(g, f, structname, defaultport): g.write('static SeedSpec6 %s[] = {\n' % structname) first = True for line in f: comment = line.find('#') if comment != -1: line = line[0:comment] line = line.strip() if not line: continue if not first: g.write(',\n') first = False (host,port) = parse_spec(line, defaultport) hoststr = ','.join(('0x%02x' % b) for b in host) g.write(' {{%s}, %i}' % (hoststr, port)) g.write('\n};\n') def main(): if len(sys.argv)<2: print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr) sys.exit(1) g = sys.stdout indir = sys.argv[1] g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('/**\n') g.write(' * List of fixed seed nodes for the bitcoin network\n') g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n') g.write(' *\n') g.write(' * Each line contains a 16-byte IPv6 address and a port.\n') g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n') g.write(' */\n') with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f: process_nodes(g, f, 'pnSeed6_main', 9887) g.write('\n') with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f: process_nodes(g, f, 'pnSeed6_test', 19887) g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') if __name__ == '__main__': main()
Bushstar/UFO-Project
contrib/seeds/generate-seeds.py
Python
mit
4,381
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Interface for a USB-connected Monsoon power meter. http://msoon.com/LabEquipment/PowerMonitor/ Currently Unix-only. Relies on fcntl, /dev, and /tmp. """ import collections import logging import os import select import struct import time from telemetry.core import util util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'pyserial') import serial # pylint: disable=F0401 import serial.tools.list_ports # pylint: disable=F0401,E0611 Power = collections.namedtuple('Power', ['amps', 'volts']) class Monsoon: """Provides a simple class to use the power meter. mon = monsoon.Monsoon() mon.SetVoltage(3.7) mon.StartDataCollection() mydata = [] while len(mydata) < 1000: mydata.extend(mon.CollectData()) mon.StopDataCollection() """ def __init__(self, device=None, serialno=None, wait=True): """Establish a connection to a Monsoon. By default, opens the first available port, waiting if none are ready. A particular port can be specified with 'device', or a particular Monsoon can be specified with 'serialno' (using the number printed on its back). With wait=False, IOError is thrown if a device is not immediately available. """ assert float(serial.VERSION) >= 2.7, \ 'Monsoon requires pyserial v2.7 or later. You have %s' % serial.VERSION self._coarse_ref = self._fine_ref = self._coarse_zero = self._fine_zero = 0 self._coarse_scale = self._fine_scale = 0 self._last_seq = 0 self._voltage_multiplier = None if device: self.ser = serial.Serial(device, timeout=1) return while 1: for (port, desc, _) in serial.tools.list_ports.comports(): if not desc.lower().startswith('mobile device power monitor'): continue tmpname = '/tmp/monsoon.%s.%s' % (os.uname()[0], os.path.basename(port)) self._tempfile = open(tmpname, 'w') try: # Use a lockfile to ensure exclusive access. # Put the import in here to avoid doing it on unsupported platforms. import fcntl fcntl.lockf(self._tempfile, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: logging.error('device %s is in use', port) continue try: # Try to open the device. self.ser = serial.Serial(port, timeout=1) self.StopDataCollection() # Just in case. self._FlushInput() # Discard stale input. status = self.GetStatus() except IOError, e: logging.error('error opening device %s: %s', port, e) continue if not status: logging.error('no response from device %s', port) elif serialno and status['serialNumber'] != serialno: logging.error('device %s is #%d', port, status['serialNumber']) else: if status['hardwareRevision'] == 1: self._voltage_multiplier = 62.5 / 10**6 else: self._voltage_multiplier = 125.0 / 10**6 return self._tempfile = None if not wait: raise IOError('No device found') logging.info('waiting for device...') time.sleep(1) def GetStatus(self): """Requests and waits for status. Returns status dictionary.""" # status packet format STATUS_FORMAT = '>BBBhhhHhhhHBBBxBbHBHHHHBbbHHBBBbbbbbbbbbBH' STATUS_FIELDS = [ 'packetType', 'firmwareVersion', 'protocolVersion', 'mainFineCurrent', 'usbFineCurrent', 'auxFineCurrent', 'voltage1', 'mainCoarseCurrent', 'usbCoarseCurrent', 'auxCoarseCurrent', 'voltage2', 'outputVoltageSetting', 'temperature', 'status', 'leds', 'mainFineResistor', 'serialNumber', 'sampleRate', 'dacCalLow', 'dacCalHigh', 'powerUpCurrentLimit', 'runTimeCurrentLimit', 'powerUpTime', 'usbFineResistor', 'auxFineResistor', 'initialUsbVoltage', 'initialAuxVoltage', 'hardwareRevision', 'temperatureLimit', 'usbPassthroughMode', 'mainCoarseResistor', 'usbCoarseResistor', 'auxCoarseResistor', 'defMainFineResistor', 'defUsbFineResistor', 'defAuxFineResistor', 'defMainCoarseResistor', 'defUsbCoarseResistor', 'defAuxCoarseResistor', 'eventCode', 'eventData', ] self._SendStruct('BBB', 0x01, 0x00, 0x00) while 1: # Keep reading, discarding non-status packets. data = self._ReadPacket() if not data: return None if len(data) != struct.calcsize(STATUS_FORMAT) or data[0] != '\x10': logging.debug('wanted status, dropped type=0x%02x, len=%d', ord(data[0]), len(data)) continue status = dict(zip(STATUS_FIELDS, struct.unpack(STATUS_FORMAT, data))) assert status['packetType'] == 0x10 for k in status.keys(): if k.endswith('VoltageSetting'): status[k] = 2.0 + status[k] * 0.01 elif k.endswith('FineCurrent'): pass # Needs calibration data. elif k.endswith('CoarseCurrent'): pass # Needs calibration data. elif k.startswith('voltage') or k.endswith('Voltage'): status[k] = status[k] * 0.000125 elif k.endswith('Resistor'): status[k] = 0.05 + status[k] * 0.0001 if k.startswith('aux') or k.startswith('defAux'): status[k] += 0.05 elif k.endswith('CurrentLimit'): status[k] = 8 * (1023 - status[k]) / 1023.0 return status def SetVoltage(self, v): """Set the output voltage, 0 to disable.""" if v == 0: self._SendStruct('BBB', 0x01, 0x01, 0x00) else: self._SendStruct('BBB', 0x01, 0x01, int((v - 2.0) * 100)) def SetMaxCurrent(self, i): """Set the max output current.""" assert i >= 0 and i <= 8 val = 1023 - int((i/8)*1023) self._SendStruct('BBB', 0x01, 0x0a, val & 0xff) self._SendStruct('BBB', 0x01, 0x0b, val >> 8) def SetUsbPassthrough(self, val): """Set the USB passthrough mode: 0 = off, 1 = on, 2 = auto.""" self._SendStruct('BBB', 0x01, 0x10, val) def StartDataCollection(self): """Tell the device to start collecting and sending measurement data.""" self._SendStruct('BBB', 0x01, 0x1b, 0x01) # Mystery command. self._SendStruct('BBBBBBB', 0x02, 0xff, 0xff, 0xff, 0xff, 0x03, 0xe8) def StopDataCollection(self): """Tell the device to stop collecting measurement data.""" self._SendStruct('BB', 0x03, 0x00) # Stop. def CollectData(self): """Return some current samples. Call StartDataCollection() first.""" while 1: # Loop until we get data or a timeout. data = self._ReadPacket() if not data: return None if len(data) < 4 + 8 + 1 or data[0] < '\x20' or data[0] > '\x2F': logging.debug('wanted data, dropped type=0x%02x, len=%d', ord(data[0]), len(data)) continue seq, packet_type, x, _ = struct.unpack('BBBB', data[:4]) data = [struct.unpack(">hhhh", data[x:x+8]) for x in range(4, len(data) - 8, 8)] if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF: logging.info('data sequence skipped, lost packet?') self._last_seq = seq if packet_type == 0: if not self._coarse_scale or not self._fine_scale: logging.info('waiting for calibration, dropped data packet') continue out = [] for main, usb, _, voltage in data: main_voltage_v = self._voltage_multiplier * (voltage & ~3) sample = 0.0 if main & 1: sample += ((main & ~1) - self._coarse_zero) * self._coarse_scale else: sample += (main - self._fine_zero) * self._fine_scale if usb & 1: sample += ((usb & ~1) - self._coarse_zero) * self._coarse_scale else: sample += (usb - self._fine_zero) * self._fine_scale out.append(Power(sample, main_voltage_v)) return out elif packet_type == 1: self._fine_zero = data[0][0] self._coarse_zero = data[1][0] elif packet_type == 2: self._fine_ref = data[0][0] self._coarse_ref = data[1][0] else: logging.debug('discarding data packet type=0x%02x', packet_type) continue if self._coarse_ref != self._coarse_zero: self._coarse_scale = 2.88 / (self._coarse_ref - self._coarse_zero) if self._fine_ref != self._fine_zero: self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero) def _SendStruct(self, fmt, *args): """Pack a struct (without length or checksum) and send it.""" data = struct.pack(fmt, *args) data_len = len(data) + 1 checksum = (data_len + sum(struct.unpack('B' * len(data), data))) % 256 out = struct.pack('B', data_len) + data + struct.pack('B', checksum) self.ser.write(out) def _ReadPacket(self): """Read a single data record as a string (without length or checksum).""" len_char = self.ser.read(1) if not len_char: logging.error('timeout reading from serial port') return None data_len = struct.unpack('B', len_char) data_len = ord(len_char) if not data_len: return '' result = self.ser.read(data_len) if len(result) != data_len: return None body = result[:-1] checksum = (data_len + sum(struct.unpack('B' * len(body), body))) % 256 if result[-1] != struct.pack('B', checksum): logging.error('invalid checksum from serial port') return None return result[:-1] def _FlushInput(self): """Flush all read data until no more available.""" self.ser.flush() flushed = 0 while True: ready_r, _, ready_x = select.select([self.ser], [], [self.ser], 0) if len(ready_x) > 0: logging.error('exception from serial port') return None elif len(ready_r) > 0: flushed += 1 self.ser.read(1) # This may cause underlying buffering. self.ser.flush() # Flush the underlying buffer too. else: break if flushed > 0: logging.debug('dropped >%d bytes', flushed)
7kbird/chrome
tools/telemetry/telemetry/core/platform/profiler/monsoon.py
Python
bsd-3-clause
10,196
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-02-29 19:41 from __future__ import unicode_literals from django.db import migrations def move_homepage_features_from_opere_to_scritti(apps, schema_editor): Scritto = apps.get_model('scritti', 'Scritto') Profile = apps.get_model('facade', 'Profile') profile = Profile.objects.first() for opera in profile.homepage_features.all(): scritto = Scritto.objects.get(name=opera.name) profile.new_homepage_features.add(scritto) profile.save() class Migration(migrations.Migration): dependencies = [ ('facade', '0004_auto_20160229_1938'), ] operations = [ migrations.RunPython(move_homepage_features_from_opere_to_scritti, reverse_code=migrations.RunPython.noop), ]
mhotwagner/backstage
facade/migrations/0005_update_homepage_features_to_scritti.py
Python
mit
794
# Natural Language Toolkit: GUI Demo for Glue Semantics with Discourse # Representation Theory (DRT) as meaning language # # Author: Dan Garrette <dhgarrette@gmail.com> # # Copyright (C) 2001-2017 NLTK Project # URL: <http://nltk.org/> # For license information, see LICENSE.TXT from nltk import compat # this fixes tkinter imports for Python 2.x try: from tkinter.font import Font from tkinter import (Button, Frame, IntVar, Label, Listbox, Menu, Scrollbar, Tk) from nltk.draw.util import CanvasFrame, ShowText except ImportError: """Ignore ImportError because tkinter might not be available.""" from nltk.util import in_idle from nltk.tag import RegexpTagger from nltk.parse import MaltParser from nltk.sem.logic import Variable from nltk.sem.drt import DrsDrawer, DrtVariableExpression from nltk.sem.glue import DrtGlue class DrtGlueDemo(object): def __init__(self, examples): # Set up the main window. self._top = Tk() self._top.title('DRT Glue Demo') # Set up key bindings. self._init_bindings() # Initialize the fonts.self._error = None self._init_fonts(self._top) self._examples = examples self._readingCache = [None for example in examples] # The user can hide the grammar. self._show_grammar = IntVar(self._top) self._show_grammar.set(1) # Set the data to None self._curExample = -1 self._readings = [] self._drs = None self._drsWidget = None self._error = None self._init_glue() # Create the basic frames. self._init_menubar(self._top) self._init_buttons(self._top) self._init_exampleListbox(self._top) self._init_readingListbox(self._top) self._init_canvas(self._top) # Resize callback self._canvas.bind('<Configure>', self._configure) ######################################### ## Initialization Helpers ######################################### def _init_glue(self): tagger = RegexpTagger( [('^(David|Mary|John)$', 'NNP'), ('^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$', 'VB'), ('^(go|order|vanish|find|approach)$', 'VB'), ('^(a)$', 'ex_quant'), ('^(every)$', 'univ_quant'), ('^(sandwich|man|dog|pizza|unicorn|cat|senator)$', 'NN'), ('^(big|gray|former)$', 'JJ'), ('^(him|himself)$', 'PRP') ]) depparser = MaltParser(tagger=tagger) self._glue = DrtGlue(depparser=depparser, remove_duplicates=False) def _init_fonts(self, root): # See: <http://www.astro.washington.edu/owen/ROTKFolklore.html> self._sysfont = Font(font=Button()["font"]) root.option_add("*Font", self._sysfont) # TWhat's our font size (default=same as sysfont) self._size = IntVar(root) self._size.set(self._sysfont.cget('size')) self._boldfont = Font(family='helvetica', weight='bold', size=self._size.get()) self._font = Font(family='helvetica', size=self._size.get()) if self._size.get() < 0: big = self._size.get()-2 else: big = self._size.get()+2 self._bigfont = Font(family='helvetica', weight='bold', size=big) def _init_exampleListbox(self, parent): self._exampleFrame = listframe = Frame(parent) self._exampleFrame.pack(fill='both', side='left', padx=2) self._exampleList_label = Label(self._exampleFrame, font=self._boldfont, text='Examples') self._exampleList_label.pack() self._exampleList = Listbox(self._exampleFrame, selectmode='single', relief='groove', background='white', foreground='#909090', font=self._font, selectforeground='#004040', selectbackground='#c0f0c0') self._exampleList.pack(side='right', fill='both', expand=1) for example in self._examples: self._exampleList.insert('end', (' %s' % example)) self._exampleList.config(height=min(len(self._examples), 25), width=40) # Add a scrollbar if there are more than 25 examples. if len(self._examples) > 25: listscroll = Scrollbar(self._exampleFrame, orient='vertical') self._exampleList.config(yscrollcommand = listscroll.set) listscroll.config(command=self._exampleList.yview) listscroll.pack(side='left', fill='y') # If they select a example, apply it. self._exampleList.bind('<<ListboxSelect>>', self._exampleList_select) def _init_readingListbox(self, parent): self._readingFrame = listframe = Frame(parent) self._readingFrame.pack(fill='both', side='left', padx=2) self._readingList_label = Label(self._readingFrame, font=self._boldfont, text='Readings') self._readingList_label.pack() self._readingList = Listbox(self._readingFrame, selectmode='single', relief='groove', background='white', foreground='#909090', font=self._font, selectforeground='#004040', selectbackground='#c0f0c0') self._readingList.pack(side='right', fill='both', expand=1) # Add a scrollbar if there are more than 25 examples. listscroll = Scrollbar(self._readingFrame, orient='vertical') self._readingList.config(yscrollcommand = listscroll.set) listscroll.config(command=self._readingList.yview) listscroll.pack(side='right', fill='y') self._populate_readingListbox() def _populate_readingListbox(self): # Populate the listbox with integers self._readingList.delete(0, 'end') for i in range(len(self._readings)): self._readingList.insert('end', (' %s' % (i+1))) self._readingList.config(height=min(len(self._readings), 25), width=5) # If they select a example, apply it. self._readingList.bind('<<ListboxSelect>>', self._readingList_select) def _init_bindings(self): # Key bindings are a good thing. self._top.bind('<Control-q>', self.destroy) self._top.bind('<Control-x>', self.destroy) self._top.bind('<Escape>', self.destroy) self._top.bind('n', self.next) self._top.bind('<space>', self.next) self._top.bind('p', self.prev) self._top.bind('<BackSpace>', self.prev) def _init_buttons(self, parent): # Set up the frames. self._buttonframe = buttonframe = Frame(parent) buttonframe.pack(fill='none', side='bottom', padx=3, pady=2) Button(buttonframe, text='Prev', background='#90c0d0', foreground='black', command=self.prev,).pack(side='left') Button(buttonframe, text='Next', background='#90c0d0', foreground='black', command=self.next,).pack(side='left') def _configure(self, event): self._autostep = 0 (x1, y1, x2, y2) = self._cframe.scrollregion() y2 = event.height - 6 self._canvas['scrollregion'] = '%d %d %d %d' % (x1,y1,x2,y2) self._redraw() def _init_canvas(self, parent): self._cframe = CanvasFrame(parent, background='white', #width=525, height=250, closeenough=10, border=2, relief='sunken') self._cframe.pack(expand=1, fill='both', side='top', pady=2) canvas = self._canvas = self._cframe.canvas() # Initially, there's no tree or text self._tree = None self._textwidgets = [] self._textline = None def _init_menubar(self, parent): menubar = Menu(parent) filemenu = Menu(menubar, tearoff=0) filemenu.add_command(label='Exit', underline=1, command=self.destroy, accelerator='q') menubar.add_cascade(label='File', underline=0, menu=filemenu) actionmenu = Menu(menubar, tearoff=0) actionmenu.add_command(label='Next', underline=0, command=self.next, accelerator='n, Space') actionmenu.add_command(label='Previous', underline=0, command=self.prev, accelerator='p, Backspace') menubar.add_cascade(label='Action', underline=0, menu=actionmenu) optionmenu = Menu(menubar, tearoff=0) optionmenu.add_checkbutton(label='Remove Duplicates', underline=0, variable=self._glue.remove_duplicates, command=self._toggle_remove_duplicates, accelerator='r') menubar.add_cascade(label='Options', underline=0, menu=optionmenu) viewmenu = Menu(menubar, tearoff=0) viewmenu.add_radiobutton(label='Tiny', variable=self._size, underline=0, value=10, command=self.resize) viewmenu.add_radiobutton(label='Small', variable=self._size, underline=0, value=12, command=self.resize) viewmenu.add_radiobutton(label='Medium', variable=self._size, underline=0, value=14, command=self.resize) viewmenu.add_radiobutton(label='Large', variable=self._size, underline=0, value=18, command=self.resize) viewmenu.add_radiobutton(label='Huge', variable=self._size, underline=0, value=24, command=self.resize) menubar.add_cascade(label='View', underline=0, menu=viewmenu) helpmenu = Menu(menubar, tearoff=0) helpmenu.add_command(label='About', underline=0, command=self.about) menubar.add_cascade(label='Help', underline=0, menu=helpmenu) parent.config(menu=menubar) ######################################### ## Main draw procedure ######################################### def _redraw(self): canvas = self._canvas # Delete the old DRS, widgets, etc. if self._drsWidget is not None: self._drsWidget.clear() if self._drs: self._drsWidget = DrsWidget( self._canvas, self._drs ) self._drsWidget.draw() if self._error: self._drsWidget = DrsWidget( self._canvas, self._error ) self._drsWidget.draw() ######################################### ## Button Callbacks ######################################### def destroy(self, *e): self._autostep = 0 if self._top is None: return self._top.destroy() self._top = None def prev(self, *e): selection = self._readingList.curselection() readingListSize = self._readingList.size() # there are readings if readingListSize > 0: # if one reading is currently selected if len(selection) == 1: index = int(selection[0]) # if it's on (or before) the first item if index <= 0: self._select_previous_example() else: self._readingList_store_selection(index-1) else: #select its first reading self._readingList_store_selection(readingListSize-1) else: self._select_previous_example() def _select_previous_example(self): #if the current example is not the first example if self._curExample > 0: self._exampleList_store_selection(self._curExample-1) else: #go to the last example self._exampleList_store_selection(len(self._examples)-1) def next(self, *e): selection = self._readingList.curselection() readingListSize = self._readingList.size() # if there are readings if readingListSize > 0: # if one reading is currently selected if len(selection) == 1: index = int(selection[0]) # if it's on (or past) the last item if index >= (readingListSize-1): self._select_next_example() else: self._readingList_store_selection(index+1) else: #select its first reading self._readingList_store_selection(0) else: self._select_next_example() def _select_next_example(self): #if the current example is not the last example if self._curExample < len(self._examples)-1: self._exampleList_store_selection(self._curExample+1) else: #go to the first example self._exampleList_store_selection(0) def about(self, *e): ABOUT = ("NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n"+ "Written by Daniel H. Garrette") TITLE = 'About: NLTK DRT Glue Demo' try: from tkMessageBox import Message Message(message=ABOUT, title=TITLE).show() except: ShowText(self._top, TITLE, ABOUT) def postscript(self, *e): self._autostep = 0 self._cframe.print_to_file() def mainloop(self, *args, **kwargs): """ Enter the Tkinter mainloop. This function must be called if this demo is created from a non-interactive program (e.g. from a secript); otherwise, the demo will close as soon as the script completes. """ if in_idle(): return self._top.mainloop(*args, **kwargs) def resize(self, size=None): if size is not None: self._size.set(size) size = self._size.get() self._font.configure(size=-(abs(size))) self._boldfont.configure(size=-(abs(size))) self._sysfont.configure(size=-(abs(size))) self._bigfont.configure(size=-(abs(size+2))) self._redraw() def _toggle_remove_duplicates(self): self._glue.remove_duplicates = not self._glue.remove_duplicates self._exampleList.selection_clear(0, 'end') self._readings = [] self._populate_readingListbox() self._readingCache = [None for ex in self._examples] self._curExample = -1 self._error = None self._drs = None self._redraw() def _exampleList_select(self, event): selection = self._exampleList.curselection() if len(selection) != 1: return self._exampleList_store_selection(int(selection[0])) def _exampleList_store_selection(self, index): self._curExample = index example = self._examples[index] self._exampleList.selection_clear(0, 'end') if example: cache = self._readingCache[index] if cache: if isinstance(cache, list): self._readings = cache self._error = None else: self._readings = [] self._error = cache else: try: self._readings = self._glue.parse_to_meaning(example) self._error = None self._readingCache[index] = self._readings except Exception as e: self._readings = [] self._error = DrtVariableExpression(Variable('Error: ' + str(e))) self._readingCache[index] = self._error #add a star to the end of the example self._exampleList.delete(index) self._exampleList.insert(index, (' %s *' % example)) self._exampleList.config(height=min(len(self._examples), 25), width=40) self._populate_readingListbox() self._exampleList.selection_set(index) self._drs = None self._redraw() def _readingList_select(self, event): selection = self._readingList.curselection() if len(selection) != 1: return self._readingList_store_selection(int(selection[0])) def _readingList_store_selection(self, index): reading = self._readings[index] self._readingList.selection_clear(0, 'end') if reading: self._readingList.selection_set(index) self._drs = reading.simplify().normalize().resolve_anaphora() self._redraw() class DrsWidget(object): def __init__(self, canvas, drs, **attribs): self._drs = drs self._canvas = canvas canvas.font = Font(font=canvas.itemcget(canvas.create_text(0, 0, text=''), 'font')) canvas._BUFFER = 3 self.bbox = (0, 0, 0, 0) def draw(self): (right, bottom) = DrsDrawer(self._drs, canvas=self._canvas).draw() self.bbox = (0, 0, right+1, bottom+1) def clear(self): self._canvas.create_rectangle(self.bbox, fill="white", width="0" ) def demo(): examples = ['John walks', 'David sees Mary', 'David eats a sandwich', 'every man chases a dog', # 'every man believes a dog yawns', # 'John gives David a sandwich', 'John chases himself', # 'John persuades David to order a pizza', # 'John tries to go', # 'John tries to find a unicorn', # 'John seems to vanish', # 'a unicorn seems to approach', # 'every big cat leaves', # 'every gray cat leaves', # 'every big gray cat leaves', # 'a former senator leaves', # 'John likes a cat', # 'John likes every cat', # 'he walks', # 'John walks and he leaves' ] DrtGlueDemo(examples).mainloop() if __name__ == '__main__': demo()
sdoran35/hate-to-hugs
venv/lib/python3.6/site-packages/nltk/sem/drt_glue_demo.py
Python
mit
18,416
from visual import* from math import* dt=0.01;GM=5000 p1=sphere(pos=(-100,0,0),radius=5,color=color.red) p2=sphere(pos=(100,0,0),radius=5,color=color.green) p3=sphere(pos=(0,0,100*sqrt(3)),radius=5,color=color.blue) p4=sphere(pos=(0,0,0),radius=3,color=color.white) p1.velocity=vector(2.5,0,-2.5*sqrt(3));p2.velocity=vector(2.5,0,2.5*sqrt(3));p3.velocity=vector(-5,0,0);p4.velocity=vector(0,0,0) p1.trail=curve(color=p1.color) R12=sqrt((p1.pos.x-p2.pos.x)**2+(p1.pos.y-p2.pos.y)**2+(p1.pos.z-p2.pos.z)**2) R13=sqrt((p1.pos.x-p3.pos.x)**2+(p1.pos.y-p3.pos.y)**2+(p1.pos.z-p3.pos.z)**2) R23=sqrt((p3.pos.x-p2.pos.x)**2+(p3.pos.y-p2.pos.y)**2+(p3.pos.z-p2.pos.z)**2) Epi=-GM*R12**(-1)-GM*R13**(-1)-GM*R23**(-1) Eki=0.5*(p1.velocity.x**2+p1.velocity.y**2+p1.velocity.z**2+p2.velocity.x**2+p2.velocity.y**2+p2.velocity.z**2+p3.velocity.x**2+p3.velocity.y**2++p3.velocity.z**2) Ei=Epi+Eki print R12,Ei for i in range(100000): rate(5000) R12=sqrt((p1.pos.x-p2.pos.x)**2+(p1.pos.y-p2.pos.y)**2+(p1.pos.z-p2.pos.z)**2) R13=sqrt((p1.pos.x-p3.pos.x)**2+(p1.pos.y-p3.pos.y)**2+(p1.pos.z-p3.pos.z)**2) R23=sqrt((p3.pos.x-p2.pos.x)**2+(p3.pos.y-p2.pos.y)**2+(p3.pos.z-p2.pos.z)**2) R14=sqrt((p1.pos.x-p4.pos.x)**2+(p1.pos.y-p4.pos.y)**2+(p1.pos.z-p4.pos.z)**2) R24=sqrt((p2.pos.x-p4.pos.x)**2+(p2.pos.y-p4.pos.y)**2+(p2.pos.z-p4.pos.z)**2) R34=sqrt((p3.pos.x-p4.pos.x)**2+(p3.pos.y-p4.pos.y)**2+(p3.pos.z-p4.pos.z)**2) p1posxm=p1.pos.x+1/2*dt*p1.velocity.x p1velocityxm=p1.velocity.x+dt*(p2.pos.x-p1.pos.x)*GM*R12**(-3)+dt*(p3.pos.x-p1.pos.x)*GM*R13**(-3) p1posym=p1.pos.y+1/2*dt*p1.velocity.y p1velocityym=p1.velocity.y+dt*(p2.pos.y-p1.pos.y)*GM*R12**(-3)+dt*(p3.pos.y-p1.pos.y)*GM*R13**(-3) p1poszm=p1.pos.z+1/2*dt*p1.velocity.z p1velocityzm=p1.velocity.z+dt*(p2.pos.z-p1.pos.z)*GM*R12**(-3)+dt*(p3.pos.z-p1.pos.z)*GM*R13**(-3) p2posxm=p2.pos.x+1/2*dt*p2.velocity.x p2velocityxm=p2.velocity.x+dt*(p1.pos.x-p2.pos.x)*GM*R12**(-3)+dt*(p3.pos.x-p2.pos.x)*GM*R23**(-3) p2posym=p2.pos.y+1/2*dt*p2.velocity.y p2velocityym=p2.velocity.y+dt*(p1.pos.y-p2.pos.y)*GM*R12**(-3)+dt*(p3.pos.y-p2.pos.y)*GM*R23**(-3) p2poszm=p2.pos.z+1/2*dt*p2.velocity.z p2velocityzm=p2.velocity.z+dt*(p1.pos.z-p2.pos.z)*GM*R12**(-3)+dt*(p3.pos.z-p2.pos.z)*GM*R23**(-3) p3posxm=p3.pos.x+1/2*dt*p3.velocity.x p3velocityxm=p3.velocity.x+dt*(p1.pos.x-p3.pos.x)*GM*R13**(-3)+dt*(p2.pos.x-p3.pos.x)*GM*R23**(-3) p3posym=p3.pos.y+1/2*dt*p3.velocity.y p3velocityym=p3.velocity.y+dt*(p1.pos.y-p3.pos.y)*GM*R13**(-3)+dt*(p2.pos.y-p3.pos.y)*GM*R23**(-3) p3poszm=p3.pos.z+1/2*dt*p3.velocity.z p3velocityzm=p3.velocity.z+dt*(p1.pos.z-p3.pos.z)*GM*R13**(-3)+dt*(p2.pos.z-p3.pos.z)*GM*R23**(-3) p4posxm=p4.pos.x+1/2*dt*p4.velocity.x p4velocityxm=p4.velocity.x+dt*(p1.pos.x-p4.pos.x)*GM*R14**(-3)+dt*(p2.pos.x-p4.pos.x)*GM*R24**(-3)+dt*(p3.pos.x-p4.pos.x)*GM*R34**(-3) p4posym=p4.pos.y+1/2*dt*p4.velocity.y p4velocityym=p4.velocity.y+dt*(p1.pos.y-p4.pos.y)*GM*R14**(-3)+dt*(p2.pos.y-p4.pos.y)*GM*R24**(-3)+dt*(p3.pos.y-p4.pos.y)*GM*R34**(-3) p4poszm=p4.pos.z+1/2*dt*p4.velocity.z p4velocityzm=p4.velocity.z+dt*(p1.pos.z-p4.pos.z)*GM*R14**(-3)+dt*(p2.pos.z-p4.pos.z)*GM*R24**(-3)+dt*(p3.pos.z-p4.pos.z)*GM*R34**(-3) p1.pos.x=p1.pos.x+p1velocityxm*dt p1.pos.y=p1.pos.y+p1velocityym*dt p1.pos.z=p1.pos.z+p1velocityzm*dt p1.velocity.x=p1.velocity.x+dt*(p2posxm-p1posxm)*GM*R12**(-3)+dt*(p3posxm-p1posxm)*GM*R13**(-3) p1.velocity.y=p1.velocity.y+dt*(p2posym-p1posym)*GM*R12**(-3)+dt*(p3posym-p1posym)*GM*R13**(-3) p1.velocity.z=p1.velocity.z+dt*(p2poszm-p1poszm)*GM*R12**(-3)+dt*(p3poszm-p1poszm)*GM*R13**(-3) p2.pos.x=p2.pos.x+p2velocityxm*dt p2.pos.y=p2.pos.y+p2velocityym*dt p2.pos.z=p2.pos.z+p2velocityzm*dt p2.velocity.x=p2.velocity.x+dt*(p1posxm-p2posxm)*GM*R12**(-3)+dt*(p3posxm-p2posxm)*GM*R23**(-3) p2.velocity.y=p2.velocity.y+dt*(p1posym-p2posym)*GM*R12**(-3)+dt*(p3posym-p2posym)*GM*R23**(-3) p2.velocity.z=p2.velocity.z+dt*(p1poszm-p2poszm)*GM*R12**(-3)+dt*(p3poszm-p2poszm)*GM*R23**(-3) p3.pos.x=p3.pos.x+p3velocityxm*dt p3.pos.y=p3.pos.y+p3velocityym*dt p3.pos.z=p3.pos.z+p3velocityzm*dt p3.velocity.x=p3.velocity.x+dt*(p1posxm-p3posxm)*GM*R13**(-3)+dt*(p2posxm-p3posxm)*GM*R23**(-3) p3.velocity.y=p3.velocity.y+dt*(p1posym-p3posym)*GM*R13**(-3)+dt*(p2posym-p3posym)*GM*R23**(-3) p3.velocity.z=p3.velocity.z+dt*(p1poszm-p3poszm)*GM*R13**(-3)+dt*(p2poszm-p3poszm)*GM*R23**(-3) p4.pos.x=p4.pos.x+p4velocityxm*dt p4.pos.y=p4.pos.y+p4velocityym*dt p4.pos.z=p4.pos.z+p4velocityzm*dt p4.velocity.x=p4.velocity.x+dt*(p1posxm-p4posxm)*GM*R14**(-3)+dt*(p2posxm-p4posxm)*GM*R24**(-3)+dt*(p3posxm-p4posxm)*GM*R34**(-3) p4.velocity.y=p4.velocity.y+dt*(p1posym-p4posym)*GM*R14**(-3)+dt*(p2posym-p4posym)*GM*R24**(-3)+dt*(p3posym-p4posym)*GM*R34**(-3) p4.velocity.z=p4.velocity.z+dt*(p1poszm-p4poszm)*GM*R14**(-3)+dt*(p2poszm-p4poszm)*GM*R24**(-3)+dt*(p3poszm-p4poszm)*GM*R34**(-3) p1.trail.append(pos=p1.pos) R12=sqrt((p1.pos.x-p2.pos.x)**2+(p1.pos.y-p2.pos.y)**2+(p1.pos.z-p2.pos.z)**2) R13=sqrt((p1.pos.x-p3.pos.x)**2+(p1.pos.y-p3.pos.y)**2+(p1.pos.z-p3.pos.z)**2) R23=sqrt((p3.pos.x-p2.pos.x)**2+(p3.pos.y-p2.pos.y)**2+(p3.pos.z-p2.pos.z)**2) Epf=-GM*R12**(-1)-GM*R13**(-1)-GM*R23**(-1) Ekf=0.5*(p1.velocity.x**2+p1.velocity.y**2+p1.velocity.z**2+p2.velocity.x**2+p2.velocity.y**2+p2.velocity.z**2+p3.velocity.x**2+p3.velocity.y**2++p3.velocity.z**2) Ef=Epf+Ekf print R12,Ef
zhangzhihan/computationalphysics_n2014301020035
Chapter3/chapter3-1/three-body_disorder.py
Python
mit
5,578
from __future__ import absolute_import from datetime import datetime from django.utils import timezone from django.core.urlresolvers import reverse from sentry.models import (ProcessingIssue, EventError, RawEvent, EventProcessingIssue) from sentry.testutils import APITestCase class ProjectProjectProcessingIssuesTest(APITestCase): def test_simple(self): self.login_as(user=self.user) team = self.create_team() project1 = self.create_project(teams=[team], name='foo') raw_event = RawEvent.objects.create(project_id=project1.id, event_id='abc') issue, _ = ProcessingIssue.objects.get_or_create( project_id=project1.id, checksum='abc', type=EventError.NATIVE_MISSING_DSYM ) EventProcessingIssue.objects.get_or_create( raw_event=raw_event, processing_issue=issue, ) url = reverse( 'sentry-api-0-project-processing-issues', kwargs={ 'organization_slug': project1.organization.slug, 'project_slug': project1.slug, } ) response = self.client.get(url, format='json') assert response.status_code == 200, response.content assert response.data['hasIssues'] is True assert response.data['hasMoreResolveableIssues'] is False assert response.data['numIssues'] == 1 assert response.data['issuesProcessing'] == 0 assert response.data['resolveableIssues'] == 0 def test_issues(self): self.login_as(user=self.user) team = self.create_team() project1 = self.create_project(teams=[team], name='foo') raw_event = RawEvent.objects.create(project_id=project1.id, event_id='abc') issue, _ = ProcessingIssue.objects.get_or_create( project_id=project1.id, checksum='abc', type=EventError.NATIVE_MISSING_DSYM, datetime=datetime(2013, 8, 13, 3, 8, 25, tzinfo=timezone.utc), ) issue2, _ = ProcessingIssue.objects.get_or_create( project_id=project1.id, checksum='abcd', type=EventError.NATIVE_MISSING_DSYM, datetime=datetime(2014, 8, 13, 3, 8, 25, tzinfo=timezone.utc), ) EventProcessingIssue.objects.get_or_create( raw_event=raw_event, processing_issue=issue, ) url = reverse( 'sentry-api-0-project-processing-issues', kwargs={ 'organization_slug': project1.organization.slug, 'project_slug': project1.slug, } ) response = self.client.get(url + '?detailed=1', format='json') assert response.status_code == 200, response.content assert len(response.data['issues']) == 2 assert response.data['numIssues'] == 2 assert response.data['lastSeen'] == issue2.datetime assert response.data['hasIssues'] is True assert response.data['hasMoreResolveableIssues'] is False assert response.data['issuesProcessing'] == 0 assert response.data['resolveableIssues'] == 0 assert response.data['issues'][0]['checksum'] == issue.checksum assert response.data['issues'][0]['numEvents'] == 1 assert response.data['issues'][0]['type'] == EventError.NATIVE_MISSING_DSYM assert response.data['issues'][1]['checksum'] == issue2.checksum def test_resolvable_issues(self): self.login_as(user=self.user) team = self.create_team() project1 = self.create_project(teams=[team], name='foo') RawEvent.objects.create(project_id=project1.id, event_id='abc') url = reverse( 'sentry-api-0-project-processing-issues', kwargs={ 'organization_slug': project1.organization.slug, 'project_slug': project1.slug, } ) response = self.client.get(url + '?detailed=1', format='json') assert response.status_code == 200, response.content assert response.data['numIssues'] == 0 assert response.data['resolveableIssues'] == 1 assert response.data['lastSeen'] is None assert response.data['hasIssues'] is False assert response.data['hasMoreResolveableIssues'] is False assert response.data['numIssues'] == 0 assert response.data['issuesProcessing'] == 0
ifduyue/sentry
tests/sentry/api/endpoints/test_project_processingissues.py
Python
bsd-3-clause
4,420
""" Serilizers for the accounts application API """ # Django from django.contrib.auth.models import User # Third Party from rest_framework import serializers # MuckRock from muckrock.accounts.models import Profile, Statistics from muckrock.jurisdiction.models import Jurisdiction class ProfileSerializer(serializers.ModelSerializer): """Serializer for Profile model""" location = serializers.PrimaryKeyRelatedField( queryset=Jurisdiction.objects.all(), style={"base_template": "input.html"}, required=False, ) class Meta: model = Profile exclude = ("user",) class UserSerializer(serializers.ModelSerializer): """Serializer for User model""" profile = ProfileSerializer() class Meta: model = User fields = ( "username", "email", "is_staff", "is_superuser", "last_login", "date_joined", "groups", "profile", ) class StatisticsSerializer(serializers.ModelSerializer): """Serializer for Statistics model""" def __init__(self, *args, **kwargs): super(StatisticsSerializer, self).__init__(*args, **kwargs) if "request" not in self.context or not self.context["request"].user.is_staff: staff_only = ( "pro_users", "pro_user_names", "total_page_views", "daily_requests_pro", "daily_requests_basic", "daily_requests_beta", "daily_requests_proxy", "daily_requests_admin", "daily_requests_org", "daily_articles", "total_tasks", "total_unresolved_tasks", "total_generic_tasks", "total_unresolved_generic_tasks", "total_orphan_tasks", "total_unresolved_orphan_tasks", "total_snailmail_tasks", "total_unresolved_snailmail_tasks", "total_rejected_tasks", "total_unresolved_rejected_tasks", "total_staleagency_tasks", "total_unresolved_staleagency_tasks", "total_flagged_tasks", "total_unresolved_flagged_tasks", "total_newagency_tasks", "total_unresolved_newagency_tasks", "total_response_tasks", "total_unresolved_response_tasks", "total_faxfail_tasks", "total_unresolved_faxfail_tasks", "total_payment_tasks", "total_unresolved_payment_tasks", "total_crowdfundpayment_tasks", "total_unresolved_crowdfundpayment_tasks", "total_reviewagency_tasks", "total_unresolved_reviewagency_tasks", "daily_robot_response_tasks", "admin_notes", "total_active_org_members", "total_active_orgs", "sent_communications_email", "sent_communications_fax", "sent_communications_mail", "total_users_filed", "flag_processing_days", "unresolved_snailmail_appeals", "total_crowdfunds", "total_crowdfunds_pro", "total_crowdfunds_basic", "total_crowdfunds_beta", "total_crowdfunds_proxy", "total_crowdfunds_admin", "open_crowdfunds", "open_crowdfunds_pro", "open_crowdfunds_basic", "open_crowdfunds_beta", "open_crowdfunds_proxy", "open_crowdfunds_admin", "closed_crowdfunds_0", "closed_crowdfunds_0_25", "closed_crowdfunds_25_50", "closed_crowdfunds_50_75", "closed_crowdfunds_75_100", "closed_crowdfunds_100_125", "closed_crowdfunds_125_150", "closed_crowdfunds_150_175", "closed_crowdfunds_175_200", "closed_crowdfunds_200", "total_crowdfund_payments", "total_crowdfund_payments_loggedin", "total_crowdfund_payments_loggedout", "public_projects", "private_projects", "unapproved_projects", "crowdfund_projects", "project_users", "project_users_pro", "project_users_basic", "project_users_beta", "project_users_proxy", "project_users_admin", "total_exemptions", "total_invoked_exemptions", "total_example_appeals", "requests_processing_days", "total_crowdsources", "total_draft_crowdsources", "total_open_crowdsources", "total_close_crowdsources", "num_crowdsource_responded_users", "total_crowdsource_responses", "crowdsource_responses_pro", "crowdsource_responses_basic", "crowdsource_responses_beta", "crowdsource_responses_proxy", "crowdsource_responses_admin", ) for field in staff_only: self.fields.pop(field) class Meta: model = Statistics fields = ( "date", "total_requests", "total_requests_success", "total_requests_denied", "total_requests_draft", "total_requests_submitted", "total_requests_awaiting_ack", "total_requests_awaiting_response", "total_requests_awaiting_appeal", "total_requests_fix_required", "total_requests_payment_required", "total_requests_no_docs", "total_requests_partial", "total_requests_abandoned", "total_requests_lawsuit", "requests_processing_days", "total_pages", "total_users", "total_agencies", "total_fees", "pro_users", "pro_user_names", "total_page_views", "daily_requests_pro", "daily_requests_basic", "daily_requests_beta", "daily_requests_proxy", "daily_requests_admin", "daily_requests_org", "daily_articles", "total_tasks", "total_unresolved_tasks", "total_generic_tasks", "total_unresolved_generic_tasks", "total_orphan_tasks", "total_unresolved_orphan_tasks", "total_snailmail_tasks", "total_unresolved_snailmail_tasks", "total_rejected_tasks", "total_unresolved_rejected_tasks", "total_staleagency_tasks", "total_unresolved_staleagency_tasks", "total_flagged_tasks", "total_unresolved_flagged_tasks", "total_newagency_tasks", "total_unresolved_newagency_tasks", "total_response_tasks", "total_unresolved_response_tasks", "total_faxfail_tasks", "total_unresolved_faxfail_tasks", "total_payment_tasks", "total_unresolved_payment_tasks", "total_crowdfundpayment_tasks", "total_unresolved_crowdfundpayment_tasks", "total_reviewagency_tasks", "total_unresolved_reviewagency_tasks", "daily_robot_response_tasks", "public_notes", "admin_notes", "total_active_org_members", "total_active_orgs", "sent_communications_email", "sent_communications_fax", "sent_communications_mail", "total_users_filed", "flag_processing_days", "unresolved_snailmail_appeals", "total_crowdfunds", "total_crowdfunds_pro", "total_crowdfunds_basic", "total_crowdfunds_beta", "total_crowdfunds_proxy", "total_crowdfunds_admin", "open_crowdfunds", "open_crowdfunds_pro", "open_crowdfunds_basic", "open_crowdfunds_beta", "open_crowdfunds_proxy", "open_crowdfunds_admin", "closed_crowdfunds_0", "closed_crowdfunds_0_25", "closed_crowdfunds_25_50", "closed_crowdfunds_50_75", "closed_crowdfunds_75_100", "closed_crowdfunds_100_125", "closed_crowdfunds_125_150", "closed_crowdfunds_150_175", "closed_crowdfunds_175_200", "closed_crowdfunds_200", "total_crowdfund_payments", "total_crowdfund_payments_loggedin", "total_crowdfund_payments_loggedout", "public_projects", "private_projects", "unapproved_projects", "crowdfund_projects", "project_users", "project_users_pro", "project_users_basic", "project_users_beta", "project_users_proxy", "project_users_admin", "total_exemptions", "total_invoked_exemptions", "total_example_appeals", "total_crowdsources", "total_draft_crowdsources", "total_open_crowdsources", "total_close_crowdsources", "num_crowdsource_responded_users", "total_crowdsource_responses", "crowdsource_responses_pro", "crowdsource_responses_basic", "crowdsource_responses_beta", "crowdsource_responses_proxy", "crowdsource_responses_admin", "machine_requests", "machine_requests_success", "machine_requests_denied", "machine_requests_draft", "machine_requests_submitted", "machine_requests_awaiting_ack", "machine_requests_awaiting_response", "machine_requests_awaiting_appeal", "machine_requests_fix_required", "machine_requests_payment_required", "machine_requests_no_docs", "machine_requests_partial", "machine_requests_abandoned", "machine_requests_lawsuit", )
MuckRock/muckrock
muckrock/accounts/serializers.py
Python
agpl-3.0
10,561
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for DeleteConversationDataset # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-dialogflow # [START dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync] from google.cloud import dialogflow_v2 def sample_delete_conversation_dataset(): # Create a client client = dialogflow_v2.ConversationDatasetsClient() # Initialize request argument(s) request = dialogflow_v2.DeleteConversationDatasetRequest( name="name_value", ) # Make the request operation = client.delete_conversation_dataset(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response) # [END dialogflow_v2_generated_ConversationDatasets_DeleteConversationDataset_sync]
googleapis/python-dialogflow
samples/generated_samples/dialogflow_v2_generated_conversation_datasets_delete_conversation_dataset_sync.py
Python
apache-2.0
1,640
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import cloud as openstack openstack.enable_logging(debug=True) cloud = openstack.connect(cloud='ovh', region_name='SBG1') image = cloud.get_image('Ubuntu 16.10') print(image.name) print(image['name'])
dtroyer/python-openstacksdk
examples/cloud/munch-dict-object.py
Python
apache-2.0
763
import matplotlib.transforms import numpy def has_legend(axes): return axes.get_legend() is not None def get_legend_text(obj): """Check if line is in legend.""" leg = obj.axes.get_legend() if leg is None: return None keys = [h.get_label() for h in leg.legendHandles if h is not None] values = [t.get_text() for t in leg.texts] label = obj.get_label() d = dict(zip(keys, values)) if label in d: return d[label] return None def transform_to_data_coordinates(obj, xdata, ydata): """The coordinates might not be in data coordinates, but could be sometimes in axes coordinates. For example, the matplotlib command axes.axvline(2) will have the y coordinates set to 0 and 1, not to the limits. Therefore, a two-stage transform has to be applied: 1. first transforming to display coordinates, then 2. from display to data. """ if obj.axes is not None and obj.get_transform() != obj.axes.transData: points = numpy.array([xdata, ydata]).T transform = matplotlib.transforms.composite_transform_factory( obj.get_transform(), obj.axes.transData.inverted() ) return transform.transform(points).T return xdata, ydata
m-rossi/matplotlib2tikz
tikzplotlib/_util.py
Python
mit
1,258
################################################### #################[ Module: Utils ]################# ################################################### """ Miscellaneous utilities for caspanda. """ def paste(x, sep=", "): """ Custom string formatting function to format (???) output. """ out = "" for i in x: out += i + sep return out.strip(sep) def print_ls(ls, ident = '', braces=1): """ Recursively prints nested lists.""" out = "" for value in ls: if isinstance(value, list): out = out + print_ls(value, ident+'\t', braces+1) else: #out = out + ident+'%s' %(value if isinstance(value, basestring) else value.name) + '\n' out = out + ident+'%s' %(value) + '\n' return out def is_instance_multiple(x, obj_class): """ Checks isinstance of multiple objects to save time. Does nothing if it is none :param x: objects :param obj_class: class of object :return: """ assert isinstance(x, list) assert isinstance(obj_class, list)
julianrcook/caspanda
caspanda/utils.py
Python
mit
1,065
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # Lint as: python3 """NitroML benchmark pipeline result overview.""" import datetime import json import re from typing import Dict, Any, List, NamedTuple, Optional from nitroml.benchmark import result as br import pandas as pd from ml_metadata import metadata_store from ml_metadata.proto import metadata_store_pb2 # Column name constants RUN_ID_KEY = 'run_id' STARTED_AT = 'started_at' BENCHMARK_FULL_KEY = 'benchmark_fullname' ARTIFACT_ID_KEY = 'artifact_id' # Component constants _STATS = 'ExampleStatistics' # Name constants _NAME = 'name' _PRODUCER_COMPONENT = 'producer_component' _STATE = 'state' _PIPELINE_NAME = 'pipeline_name' _PIPELINE_ROOT = 'pipeline_root' _RUN_ID = 'run_id' _COMPONENT_ID = 'component_id' # IR-Based TFXDagRunner constants _IS_IR_KEY = 'is_ir' # Default columns _DEFAULT_COLUMNS = (STARTED_AT, RUN_ID_KEY, br.BenchmarkResult.BENCHMARK_NAME_KEY, br.BenchmarkResult.BENCHMARK_RUN_KEY, br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY) _DATAFRAME_CONTEXTUAL_COLUMNS = (STARTED_AT, RUN_ID_KEY, BENCHMARK_FULL_KEY, br.BenchmarkResult.BENCHMARK_NAME_KEY, br.BenchmarkResult.BENCHMARK_RUN_KEY, br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY) _DEFAULT_CUSTOM_PROPERTIES = { _NAME, _PRODUCER_COMPONENT, _STATE, _PIPELINE_NAME } class _Result(NamedTuple): """Wrapper for properties and property names.""" properties: Dict[str, Dict[str, Any]] property_names: List[str] class _RunInfo(NamedTuple): """Wrapper for run id and component name.""" run_id: str = '' component_name: str = '' started_at: int = 0 def _merge_results(results: List[_Result]) -> _Result: """Merges _Result objects into one.""" properties = {} property_names = [] for result in results: for key, props in result.properties.items(): if key in properties: properties[key].update(props) else: properties[key] = {**props} property_names += result.property_names return _Result(properties=properties, property_names=property_names) def _to_pytype(val: str) -> Any: """Coverts val to python type.""" try: return json.loads(val.lower()) except ValueError: return val def _parse_value(value: metadata_store_pb2.Value) -> Any: """Parse value from `metadata_store_pb2.Value` proto.""" if value.HasField('int_value'): return value.int_value elif value.HasField('double_value'): return value.double_value else: return _to_pytype(value.string_value) def _get_artifact_run_info_map(store: metadata_store.MetadataStore, artifact_ids: List[int]) -> Dict[int, _RunInfo]: """Returns a dictionary mapping artifact_id to its MyOrchestrator run_id. Args: store: MetaDataStore object to connect to MLMD instance. artifact_ids: A list of artifact ids to load. Returns: A dictionary containing artifact_id as a key and MyOrchestrator run_id as value. """ # Get events of artifacts. events = store.get_events_by_artifact_ids(artifact_ids) exec_to_artifact = {} for event in events: exec_to_artifact[event.execution_id] = event.artifact_id # Get execution of artifacts. executions = store.get_executions_by_id(list(exec_to_artifact.keys())) artifact_to_run_info = {} for execution in executions: run_id = execution.properties[RUN_ID_KEY].string_value component = execution.properties[_COMPONENT_ID].string_value artifact_id = exec_to_artifact[execution.id] artifact_to_run_info[artifact_id] = _RunInfo( run_id=run_id, component_name=component, started_at=execution.create_time_since_epoch) return artifact_to_run_info def _get_benchmark_results(store: metadata_store.MetadataStore) -> _Result: """Returns the benchmark results of the BenchmarkResultPublisher component. Args: store: MetaDataStore object to connect to MLMD instance. Returns: A _Result objects with properties containing benchmark results. """ metrics = {} property_names = set() publisher_artifacts = store.get_artifacts_by_type( br.BenchmarkResult.TYPE_NAME) for artifact in publisher_artifacts: evals = {} for key, val in artifact.custom_properties.items(): evals[key] = _parse_value(val) # Change for the IR world. if key == 'name': new_id = _parse_value(val).split(':') if len(new_id) > 2: evals[RUN_ID_KEY] = new_id[1] property_names = property_names.union(evals.keys()) metrics[artifact.id] = evals artifact_to_run_info = _get_artifact_run_info_map(store, list(metrics.keys())) properties = {} for artifact_id, evals in metrics.items(): run_info = artifact_to_run_info[artifact_id] started_at = run_info.started_at // 1000 evals[STARTED_AT] = datetime.datetime.fromtimestamp(started_at) if RUN_ID_KEY not in metrics[artifact_id]: # Non-IR based runner. continue run_id = metrics[artifact_id][RUN_ID_KEY] result_key = run_id + '.' + evals[br.BenchmarkResult.BENCHMARK_NAME_KEY] if result_key in properties: properties[result_key].update(evals) else: properties[result_key] = {**evals} property_names = property_names.difference( {_NAME, _PRODUCER_COMPONENT, _STATE, *_DEFAULT_COLUMNS, _IS_IR_KEY}) return _Result(properties=properties, property_names=sorted(property_names)) def get_statisticsgen_dir_list( store: metadata_store.MetadataStore) -> List[str]: """Obtains a list of statisticsgen_dir from the store.""" stats_artifacts = store.get_artifacts_by_type(_STATS) stat_dirs_list = [artifact.uri for artifact in stats_artifacts] return stat_dirs_list def _make_dataframe(metrics_list: List[Dict[str, Any]], columns: List[str]) -> pd.DataFrame: """Makes pandas.DataFrame from metrics_list.""" df = pd.DataFrame(metrics_list) if not df.empty: # Reorder columns. # Strip benchmark run repetition for aggregation. df[BENCHMARK_FULL_KEY] = df[br.BenchmarkResult.BENCHMARK_NAME_KEY] df[br.BenchmarkResult.BENCHMARK_NAME_KEY] = df[ br.BenchmarkResult.BENCHMARK_NAME_KEY].apply( lambda x: re.sub(r'\.run_\d_of_\d$', '', x)) key_columns = list(_DATAFRAME_CONTEXTUAL_COLUMNS) if br.BenchmarkResult.BENCHMARK_RUN_KEY not in df: key_columns.remove(br.BenchmarkResult.BENCHMARK_RUN_KEY) if br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY not in df: key_columns.remove(br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY) df = df[key_columns + columns] df = df.set_index([STARTED_AT]) return df def _aggregate_results(df: pd.DataFrame, metric_aggregators: Optional[List[Any]], groupby_columns: List[str]): """Aggregates metrics in an overview pd.DataFrame.""" df = df.copy() groupby_columns = groupby_columns.copy() if br.BenchmarkResult.BENCHMARK_RUN_KEY in df: df = df.drop([br.BenchmarkResult.BENCHMARK_RUN_KEY], axis=1) groupby_columns.remove(br.BenchmarkResult.BENCHMARK_RUN_KEY) groupby_columns.remove(BENCHMARK_FULL_KEY) if br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY not in df: groupby_columns.remove(br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY) # Group by contextual columns and aggregate metrics. df = df.groupby(groupby_columns) df = df.agg(metric_aggregators) # Flatten MultiIndex into a DataFrame. df.columns = [' '.join(col).strip() for col in df.columns.values] return df.reset_index().set_index('started_at') def overview( store: metadata_store.MetadataStore, metric_aggregators: Optional[List[Any]] = None, ) -> pd.DataFrame: """Returns a pandas.DataFrame containing hparams and evaluation results. This method assumes that `tf.enable_v2_behavior()` was called beforehand. It loads results for all evaluation therefore method can be slow. TODO(b/151085210): Allow filtering incomplete benchmark runs. Assumptions: For the given pipeline, MyOrchestrator run_id and component_id of trainer is unique and (my_orchestrator_run_id + trainer.component_id-postfix) is equal to (my_orchestrator_run_id + artifact.producer_component-postfix). Args: store: MetaDataStore object for connecting to an MLMD instance. metric_aggregators: Iterable of functions and/or function names, e.g. [np.sum, 'mean']. Groups individual runs by their contextual features (run id, hparams), and aggregates metrics by the given functions. If a function, must either work when passed a DataFrame or when passed to DataFrame.apply. Returns: A pandas DataFrame with the loaded hparams and evaluations or an empty one if no evaluations and hparams could be found. """ result = _get_benchmark_results(store) # Filter metrics that have empty hparams and evaluation results. results_list = [ result for result in result.properties.values() if len(result) > len(_DEFAULT_COLUMNS) ] df = _make_dataframe(results_list, result.property_names) if metric_aggregators: return _aggregate_results( df, metric_aggregators=metric_aggregators, groupby_columns=list(_DATAFRAME_CONTEXTUAL_COLUMNS)) return df
google/nitroml
nitroml/benchmark/results.py
Python
apache-2.0
9,930
ACTIONS = [ { 'name': 'install', 'description': 'Install new casks', 'autocomplete': 'install ', 'arg': '', 'valid': False }, { 'name': 'uninstall', 'description': 'Uninstall casks', 'autocomplete': 'uninstall ', 'arg': '', 'valid': False }, { 'name': 'search', 'description': 'Search casks', 'autocomplete': 'search ', 'arg': '', 'valid': False }, { 'name': 'list', 'description': 'List installed casks', 'autocomplete': 'list ', 'arg': '', 'valid': False }, { 'name': 'upgrade', 'description': 'Upgrade casks', 'autocomplete': '', 'arg': 'brew upgrade --cask', 'valid': True }, { 'name': 'doctor', 'description': 'Run brew doctor', 'autocomplete': '', 'arg': 'brew doctor', 'valid': True }, { 'name': 'home', 'description': 'Open the homepage of a cask', 'autocomplete': 'home', 'arg': '', 'valid': False }, { 'name': 'config', 'description': 'Open `settings.json` in your default editor.', 'autocomplete': 'config', 'arg': '', 'valid': False }, { 'name': 'Clear workflow cache', 'description': '', 'autocomplete': 'workflow:delcache', 'arg': '', 'valid': False } ]
fniephaus/alfred-homebrew
src/cask_actions.py
Python
mit
1,500
from __future__ import division, print_function import numpy as np import nose.tools as nt import regreg.api as rr from ..group_lasso import (group_lasso, selected_targets, full_targets, debiased_targets) from ...tests.instance import gaussian_instance from ...tests.flags import SET_SEED from ...tests.decorators import set_sampling_params_iftrue, set_seed_iftrue from ...algorithms.sqrt_lasso import choose_lambda, solve_sqrt_lasso from ..randomization import randomization from ...tests.decorators import rpy_test_safe @set_seed_iftrue(SET_SEED) def test_group_lasso(n=400, p=100, signal_fac=3, s=5, sigma=3, target='full', rho=0.4, randomizer_scale=.75, ndraw=100000): """ Test group lasso """ inst, const = gaussian_instance, group_lasso.gaussian signal = np.sqrt(signal_fac * np.log(p)) X, Y, beta = inst(n=n, p=p, signal=signal, s=s, equicorrelated=False, rho=rho, sigma=sigma, random_signs=True)[:3] orthogonal = True if orthogonal: X = np.linalg.svd(X, full_matrices=False)[0] Y = X.dot(beta) + sigma * np.random.standard_normal(n) n, p = X.shape sigma_ = np.std(Y) groups = np.floor(np.arange(p)/2).astype(np.int) weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)]) conv = const(X, Y, groups, weights, randomizer_scale=randomizer_scale * sigma_) signs = conv.fit() nonzero = conv.selection_variable['directions'].keys() if target == 'full': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = full_targets(conv.loglike, conv._W, nonzero, conv.penalty) elif target == 'selected': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = selected_targets(conv.loglike, conv._W, nonzero, conv.penalty) elif target == 'debiased': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = debiased_targets(conv.loglike, conv._W, nonzero, conv.penalty) _, pval, intervals = conv.summary(observed_target, group_assignments, cov_target, cov_target_score, alternatives, ndraw=ndraw, compute_intervals=False) which = np.zeros(p, np.bool) for group in conv.selection_variable['directions'].keys(): which_group = conv.penalty.groups == group which += which_group return pval[beta[which] == 0], pval[beta[which] != 0] @set_seed_iftrue(SET_SEED) def test_lasso(n=400, p=200, signal_fac=1.5, s=5, sigma=3, target='full', rho=0.4, ndraw=10000): """ Test group lasso with groups of size 1, ie lasso """ inst, const = gaussian_instance, group_lasso.gaussian signal = np.sqrt(signal_fac * np.log(p)) X, Y, beta = inst(n=n, p=p, signal=signal, s=s, equicorrelated=False, rho=rho, sigma=sigma, random_signs=True)[:3] n, p = X.shape sigma_ = np.std(Y) groups = np.arange(p) weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)]) conv = const(X, Y, groups, weights) signs = conv.fit() nonzero = conv.selection_variable['directions'].keys() if target == 'full': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = full_targets(conv.loglike, conv._W, nonzero, conv.penalty) elif target == 'selected': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = selected_targets(conv.loglike, conv._W, nonzero, conv.penalty) elif target == 'debiased': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = debiased_targets(conv.loglike, conv._W, nonzero, conv.penalty) _, pval, intervals = conv.summary(observed_target, group_assignments, cov_target, cov_target_score, alternatives, ndraw=ndraw, compute_intervals=False) which = np.zeros(p, np.bool) for group in conv.selection_variable['directions'].keys(): which_group = conv.penalty.groups == group which += which_group return pval[beta[which] == 0], pval[beta[which] != 0] @set_seed_iftrue(SET_SEED) def test_mixed(n=400, p=200, signal_fac=1.5, s=5, sigma=3, target='full', rho=0.4, ndraw=10000): """ Test group lasso with a mix of groups of size 1, and larger """ inst, const = gaussian_instance, group_lasso.gaussian signal = np.sqrt(signal_fac * np.log(p)) X, Y, beta = inst(n=n, p=p, signal=signal, s=s, equicorrelated=False, rho=rho, sigma=sigma, random_signs=True)[:3] n, p = X.shape sigma_ = np.std(Y) groups = np.arange(p) groups[-5:] = -1 groups[-8:-5] = -2 Y += X[:,-8:].dot(np.ones(8)) * 5 # so we select the last two groups weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)]) conv = const(X, Y, groups, weights) signs = conv.fit() nonzero = conv.selection_variable['directions'].keys() if target == 'full': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = full_targets(conv.loglike, conv._W, nonzero, conv.penalty) elif target == 'selected': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = selected_targets(conv.loglike, conv._W, nonzero, conv.penalty) elif target == 'debiased': (observed_target, group_assignments, cov_target, cov_target_score, alternatives) = debiased_targets(conv.loglike, conv._W, nonzero, conv.penalty) _, pval, intervals = conv.summary(observed_target, group_assignments, cov_target, cov_target_score, alternatives, ndraw=ndraw, compute_intervals=False) which = np.zeros(p, np.bool) for group in conv.selection_variable['directions'].keys(): which_group = conv.penalty.groups == group which += which_group return pval[beta[which] == 0], pval[beta[which] != 0] @set_seed_iftrue(SET_SEED) def test_all_targets(n=100, p=20, signal_fac=1.5, s=5, sigma=3, rho=0.4): for target in ['full', 'selected', 'debiased']: test_group_lasso(n=n, p=p, signal_fac=signal_fac, s=s, sigma=sigma, rho=rho, target=target) def main(nsim=500, n=200, p=50, target='full', sigma=3): import matplotlib.pyplot as plt P0, PA = [], [] from statsmodels.distributions import ECDF for i in range(nsim): try: p0, pA = test_group_lasso(n=n, p=p, target=target, sigma=sigma) except: pass print(len(p0), len(pA)) P0.extend(p0) PA.extend(pA) P0_clean = np.array(P0) P0_clean = P0_clean[P0_clean > 1.e-5] # print(np.mean(P0_clean), np.std(P0_clean), np.mean(np.array(PA) < 0.05), np.sum(np.array(PA) < 0.05) / (i+1), np.mean(np.array(P0) < 0.05), np.mean(P0_clean < 0.05), np.mean(np.array(P0) < 1e-5), 'null pvalue + power + failure') if i % 3 == 0 and i > 0: U = np.linspace(0, 1, 101) plt.clf() if len(P0_clean) > 0: plt.plot(U, ECDF(P0_clean)(U)) if len(PA) > 0: plt.plot(U, ECDF(PA)(U), 'r') plt.plot([0, 1], [0, 1], 'k--') plt.savefig("plot.pdf") plt.show()
selective-inference/selective-inference
selectinf/randomized/tests/test_group_lasso.py
Python
bsd-3-clause
10,569
''' Created on Sep 22, 2016 @author: rtorres ''' from flask_api.status import HTTP_500_INTERNAL_SERVER_ERROR,\ HTTP_503_SERVICE_UNAVAILABLE class BaseIWSExceptions(Exception): status_code = HTTP_500_INTERNAL_SERVER_ERROR message = 'Base exception to iws be' def __init__(self, arg=None): if arg: self.message = arg class TechnicalException(BaseException): def __init__(self): super(TechnicalException, self).__init__() self.message = 'Technical exception to iws be' class LogicalException(BaseException): def __init__(self): super(LogicalException, self).__init__() self.message = 'Logical exception to iws be' self.status_code = HTTP_503_SERVICE_UNAVAILABLE
rafasis1986/EngineeringMidLevel
flaskiwsapp/snippets/exceptions/baseExceptions.py
Python
mit
753
''' display_video written by Daniel Wirick 12-28-14 as a front-end to recv_stream. This program can be used and distributed without restrictions. Version: 0.2 ''' import pygame import sys import time import subprocess import os pygame.init () font = pygame.font.Font(None, 36) wrk_dir = os.path.dirname(os.path.abspath(__file__)) os.chdir(wrk_dir) # A 640x480 frame should be 921600 bytes buff = "" display = pygame.display.set_mode ((640,480)) #need to subprocess recv_stream recv_stream = subprocess.Popen(["../20-recv_stream/bin/recv_stream"], shell=False, bufsize=0, stdout=subprocess.PIPE) pipe = recv_stream.stdout #921600 running = True oldtime = time.time () fps = 0.0 while running: data = pipe.read (921600) #blocking image = pygame.image.frombuffer (data, (640,480), "RGB") text = font.render(str(fps)[0:3], 1, (10, 220, 220)) display.blit (image, (0, 0)) display.blit(text, (0,0)) pygame.display.flip () newtime = time.time () fps = 1.0/(newtime - oldtime) oldtime = newtime for event in pygame.event.get (): if event.type == pygame.QUIT: recv_stream.terminate () running = False #time.sleep (.01) read is blocking so sleep isn't needed.
oregoncoastrobotics/Video-Streaming
10-lan_cam/20-recv_stream/display_video.py
Python
mit
1,181
#!/usr/bin/env python """ Unit tests for Manipulator classes in libvirt_xml module. """ import unittest import common from virsh_unittest import FakeVirshFactory from autotest.client.utils import CmdResult from libvirt_xml.network_xml import NetworkXML from staging.backports import itertools # The output of virsh.net_list with only default net _DEFAULT_NET = (' Name State Autostart Persistent\n' '----------------------------------------------------------\n' ' default active yes yes\n') # Set initial state of test net global _net_state _net_state = {'active': False, 'autostart': False, 'persistent': False} class NetworkTestBase(unittest.TestCase): """ Base class for NetworkXML test providing fake virsh commands. """ @staticmethod def _net_list(option='--all', **dargs): """Bogus net_list command""" cmd = 'virsh net-list --all' if not _net_state['active'] and not _net_state['persistent']: test_net = '' else: if _net_state['active']: active = 'active' else: active = 'inactive' if _net_state['persistent']: persistent = 'yes' else: persistent = 'no' if _net_state['autostart']: autostart = 'yes' else: autostart = 'no' test_net = ' %-21s%-11s%-14s%-11s\n' % ( 'unittest', active, autostart, persistent) output = _DEFAULT_NET + test_net return CmdResult(cmd, output) @staticmethod def _net_define(xmlfile='unittest.xml', **dargs): """Bogus net_define command""" _net_state['persistent'] = True @staticmethod def _net_undefine(name='unittest', **dargs): """Bogus net_undefine command""" _net_state['persistent'] = False _net_state['autostart'] = False @staticmethod def _net_start(name='unittest', **dargs): """Bogus net_start command""" _net_state['active'] = True @staticmethod def _net_destroy(name='unittest', **dargs): """Bogus net_destroy command""" _net_state['active'] = False @staticmethod def _net_autostart(name='unittest', extra='', **dargs): """Bogus net_autostart command""" if _net_state['persistent']: if extra == '--disable': _net_state['autostart'] = False else: _net_state['autostart'] = True else: _net_state['autostart'] = False def setUp(self): # Use defined virsh methods below self.bogus_virsh = FakeVirshFactory(preserve=['net_state_dict']) self.bogus_virsh.__super_set__('net_list', self._net_list) self.bogus_virsh.__super_set__('net_define', self._net_define) self.bogus_virsh.__super_set__('net_undefine', self._net_undefine) self.bogus_virsh.__super_set__('net_start', self._net_start) self.bogus_virsh.__super_set__('net_destroy', self._net_destroy) self.bogus_virsh.__super_set__('net_autostart', self._net_autostart) class NetworkXMLTest(NetworkTestBase): """ Unit test class for manipulator methods in NetworkXML class. """ def test_sync_and_state_dict(self): """ Unit test for sync and state_dict methods of NetworkXML class. Traverse all possible state and call sync using the state. """ # Test sync without state option test_xml = NetworkXML(network_name='unittest', virsh_instance=self.bogus_virsh) test_xml.sync() new_state = test_xml.state_dict() state = {'active': True, 'persistent': True, 'autostart': True} self.assertEqual(state, new_state) for values in itertools.product([True, False], repeat=3): # Change network to all possible states. keys = ['active', 'persistent', 'autostart'] state = dict(zip(keys, values)) test_xml.sync(state=state) # Check result's validity. new_state = test_xml.state_dict() # Transient network can't set autostart if state == {'active': True, 'persistent': False, 'autostart': True}: state = {'active': True, 'persistent': False, 'autostart': False} # Non-exist network should return None when retieving state. if not state['active'] and not state['persistent']: assert new_state is None else: self.assertEqual(state, new_state) if __name__ == '__main__': unittest.main()
autotest/virt-test
virttest/libvirt_network_unittest.py
Python
gpl-2.0
4,893
# -*- coding: utf-8 -*- from .helper import Helper from ..formatters.output_formatter import OutputFormatter class FormatterHelper(Helper): def format_section(self, section, message, style='info'): return '<%s>[%s]</%s> %s' % (style, section, style, message) def format_block(self, messages, style, large=False): messages = [messages] if not isinstance(messages, (list, tuple)) else messages l = 0 lines = [] for message in messages: message = OutputFormatter.escape(message) lines.append((' %s ' if large else ' %s ') % message) l = max(len(message) + (4 if large else 2), l) messages = [' ' * l] if large else [] for line in lines: messages.append(line + ' ' * (l - len(line))) if large: messages.append(' ' * l) messages = map(lambda m: '<%s>%s</%s>' % (style, m, style), messages) return '\n'.join(messages) def get_name(self): return 'formatter'
Romibuzi/cleo
cleo/helpers/formatter_helper.py
Python
mit
1,024
import abc from collections import namedtuple import logging import numpy as np class LineSearchConverged(Exception): def __init__(self, alpha): self.alpha = alpha class LineSearchNotConverged(Exception): pass LineSearchResult = namedtuple( "LineSearchResult", "converged alpha f_new g_new f_evals df_evals dphi0", # defaults=( None, None, None, None, None, None), ) class LineSearch(metaclass=abc.ABCMeta): def __init__( self, p, cond="armijo", x0=None, geometry=None, f=None, df=None, alpha_init=None, f0=None, g0=None, c1=0.1, c2=0.9, max_cycles=10, alpha_min=1e-6, ): self.p = p self.geometry = geometry self.f = f self.df = df geometry_supplied = self.geometry is not None assert geometry_supplied or ( x0 is not None ), "Supply either 'geometry' or the starting coordinates 'x0'!" assert geometry_supplied or (self.f and self.df), ( "Supply either 'geometry' with a calculator or the two functions " "'f' and 'df' to calculate the energy and its gradient!" ) self.x0 = x0 if self.geometry: self.f = lambda coords: self.geometry.get_energy_at(coords) self.df = lambda coords: -self.geometry.get_energy_and_forces_at(coords)[ "forces" ] self.x0 = self.geometry.coords.copy() self.alpha_init = alpha_init self.f0 = f0 self.g0 = g0 self.c1 = c1 self.c2 = c2 self.max_cycles = max_cycles self.alpha_min = alpha_min # Store calculated energies & gradients self.alpha_fs = {} self.alpha_dfs = {} self.f_evals = 0 self.df_evals = 0 self.dphis = {} self.cond_funcs = { "armijo": self.sufficiently_decreased, "wolfe": self.wolfe_condition, "strong_wolfe": self.strong_wolfe_condition, } self.can_eval_cond_funcs = { "armijo": lambda alpha: alpha in self.alpha_fs, "wolfe": self.got_alpha_phi_dphi, "strong_wolfe": self.got_alpha_phi_dphi, } self.cond_func = self.cond_funcs[cond] self.can_eval_cond_func = self.can_eval_cond_funcs[cond] self.logger = logging.getLogger("optimizer") def log(self, message): self.logger.debug(message) def prepare_line_search(self): if self.f0 is None: self.phi0 = self.get_phi_dphi("f", 0) else: self.phi0 = self.f0 self.alpha_fs[0.0] = self.f0 if self.g0 is None: self.dphi0 = self.get_phi_dphi("g", 0) else: self.dphi0 = self.g0.dot(self.p) self.alpha_dfs[0.0] = self.g0 def check_alpha(self, alpha): if (alpha != 0.0) and (np.isnan(alpha) or (alpha < self.alpha_min)): raise LineSearchNotConverged() def _phi(self, alpha): alpha = float(alpha) self.check_alpha(alpha) try: f_alpha = self.alpha_fs[alpha] except KeyError: self.log(f"\tEvaluating energy for alpha={alpha:.6f}") f_alpha = self.f(self.x0 + alpha * self.p) self.f_evals += 1 self.alpha_fs[alpha] = f_alpha return f_alpha def _dphi(self, alpha): alpha = float(alpha) self.check_alpha(alpha) try: df_alpha = self.alpha_dfs[alpha] dphi_ = df_alpha.dot(self.p) except KeyError: self.log(f"\tEvaluating gradient for alpha={alpha:.6f}") df_alpha = self.df(self.x0 + alpha * self.p) self.df_evals += 1 self.alpha_dfs[alpha] = df_alpha dphi_ = df_alpha.dot(self.p) self.dphis[alpha] = dphi_ return dphi_ def got_alpha_phi_dphi(self, alpha): return (alpha in self.alpha_fs) and (alpha in self.alpha_dfs) def get_phi_dphi(self, what, alpha, check=True): """Wrapper that handles function/gradient evaluations.""" alpha = float(alpha) whats = "f g fg".split() assert what in whats calc_funcs = { "f": self._phi, "g": self._dphi, } result = [calc_funcs[w](alpha) for w in what] # Check if we got both phi and dphi for alpha now. If so we # can check if the chosen condition (Wolfe/approx. Wolfe) is # satisfied. if ( check and (alpha > 0.0) and self.can_eval_cond_func(alpha) and self.cond_func(alpha) ): self.log(f"Line search condition is satisfied for α={alpha:.6f}.") raise LineSearchConverged(alpha) # Dont return a list if only f or g was requested. if len(what) == 1: result = result[0] return result def get_fg(self, what, alpha): """Lookup raw function/gradient values for a given alpha.""" whats = "f g fg".split() assert what in whats lookups = { "f": self.alpha_fs, "g": self.alpha_dfs, } result = [lookups[w][alpha] for w in what] if len(what) == 1: result = result[0] return result def sufficiently_decreased(self, alpha): """Sufficient decrease/Armijo condition.""" return self._phi(alpha) <= (self.phi0 + self.c1 * alpha * self.dphi0) def curvature_condition(self, alpha): return self._dphi(alpha) >= self.c2 * self.dphi0 def strong_curvature_condition(self, alpha): return abs(self._dphi(alpha)) <= -self.c2 * self.dphi0 def wolfe_condition(self, alpha): """Normal, not strong, Wolfe condition.""" return self.sufficiently_decreased(alpha) and self.curvature_condition(alpha) def strong_wolfe_condition(self, alpha): """Strong wolfe condition.""" return self.sufficiently_decreased(alpha) and self.strong_curvature_condition( alpha ) @abc.abstractmethod def run_line_search(self): raise NotImplementedError def run(self): self.prepare_line_search() # Normal termination try: self.log(f"Starting {self.__class__.__name__} line search.") alpha = self.run_line_search() # Termination in get_phi_dphi except LineSearchConverged as lsc: alpha = lsc.alpha # Failed LineSearch except LineSearchNotConverged: alpha = None result = LineSearchResult( converged=bool(alpha), alpha=alpha, # The gradient at the endpoint of the line search may not # have been evaluted, but the function value was always # evaluated, except when the line search did not converge. f_new=self.alpha_fs.get(alpha, None), g_new=self.alpha_dfs.get(alpha, None), f_evals=self.f_evals, df_evals=self.df_evals, dphi0=self.dphi0, ) return result
eljost/pysisyphus
pysisyphus/line_searches/LineSearch.py
Python
gpl-3.0
7,219
from bokeh.io import output_notebook from bokeh.plotting import figure, show, gridplot, hplot, vplot, curdoc import numpy as np import os import csv from bokeh.client import push_session def download_trajectory_data(file): """ Converts a .csv file dataset to a numpy array. If the file is not a .csv file or the file does not exist, returns an error message. Inputs: .csv file. Outputs: numpy array """ # Define unit test variables file_csv = True file_exists = True # Check if file is a .csv file if file[-4:] == '.csv': # Check if file exists and downloads it.\ try: name = np.genfromtxt(file, delimiter=",") name = np.delete(name, 0, 0) except: print('File does not exist') file_exists = False return (file_csv, file_exists) else: return name else: file_csv = False print('File is not a .csv file') return (file_csv, file_exists) def csv_writer(): """ Creates a sample dataset for unit tests called output.csv """ if os.path.exists('output.csv'): print('output.csv', 'already exists') else: data = ["first_name,last_name,city".split(","), "Tyrese,Hirthe,Strackeport".split(","), "Jules,Dicki,Lake Nickolasville".split(","), "Dedric,Medhurst,Stiedemannberg".split(",") ] path = "output.csv" with open(path, "w") as csv_file: writer = csv.writer(csv_file, delimiter=',') for line in data: writer.writerow(line) def define_xydata(dataset, sets): """ Takes a large numpy array (presumably trajectory data with frames, x coordinates, y coordinates) of n columns and splits into (n-1)/3 numpy arrays of x-y data plus a separate array of time data. The first column in array is time column. Afterwards, every set of three columns becomes an entry in dictionary (Run i). Example: trajectory data over 10-second interval with three particles, each with three types of data: frame, x coordinate, y coordinate. define_xydata would output a times array (1 column) plus 3 3-column arrays of trajectory data with names Run['Run1'], Run['Run2'], Run['Run3']. These can be used to define variable with desired names later. Input: Large numpy array (n columns), number of sets to define from array Output: time array (1 column) trajectory arrays ((n-1)/3 columns) """ splitsuccess = True wholenumber = True justrightsets = True correctformat = True if (dataset.shape[1] - 1) % 3 == 0: if sets < (dataset.shape[1] - 1)/3: if float(sets).is_integer(): times = dataset[:, 0] Run = dict() for num in range(1, sets + 1): Run["Run" + str(num)] = dataset[:, 3 * num - 2: 3 * num + 1] return times, Run else: print("The variable 'sets' must be a whole number.") wholenumber = False splitsuccess = False return (splitsuccess, wholenumber, justrightsets, correctformat) else: print("Desired number of sets is too high for this dataset.") justrightsets = False splitsuccess = False return (splitsuccess, wholenumber, justrightsets, correctformat) else: print("Data is not of right format. Dataset must have 1 time column and sets of 3 columns of trajectory data.") correctformat = False splitsuccess = False return (splitsuccess, wholenumber, justrightsets, correctformat) def shift_trajectory(xydata): """ Adjusts the coordinate system of x-y trajectory data such that if the data were plotted, the center of the plot would be (0,0). Inputs: xydata: a numpy array of 3 columns: frame, x-coordinate and y-coordinate. """ length = xydata.shape[0] width = xydata.shape[1] # Define unit test variables numerical = True justright = True # checks for data that's not float64 format for num in range(0, width): for yes in range(0, length): if np.dtype(xydata[yes, num]) == np.dtype('float64'): numerical = True else: numerical = False break if not numerical: print("Array contains data that isn't of type float64.") else: # Checks if array is correct format (3 columns) if width == 3: x_mean = (np.min(xydata[:, 1]) + np.max(xydata[:, 1]))/2 y_mean = (np.min(xydata[:, 2]) + np.max(xydata[:, 2]))/2 xydata[:, 1] = xydata[:, 1] - x_mean xydata[:, 2] = xydata[:, 2] - y_mean print("Trajectory successfully shifted.") else: if width > 3: x_mean = (np.min(xydata[:, 1]) + np.max(xydata[:, 1]))/2 y_mean = (np.min(xydata[:, 2]) + np.max(xydata[:, 2]))/2 xydata[:, 1] = xydata[:, 1] - x_mean xydata[:, 2] = xydata[:, 2] - y_mean justright = False print("Array has more than three columns. May not yield correct results") else: justright = False print("Array doesn't have enough columns") return (numerical, justright) def plot_trajectory(xydata, charttitle): """ Plots a single 3-column numpy array of trajectory data (frames in column 1, x coordinates in column 2, y coordinates in column 3) within iPython notebook. Note: MUST have run output_notebook in order to run successfully. Input: numpy array, chart title (string) Output: displays trajectory plot inline """ length = xydata.shape[0] width = xydata.shape[1] justright = True if width == 3: x = xydata[:, 1] y = xydata[:, 2] p = figure(title=charttitle, title_text_font_size='13pt', width=300, height=300) p.line(x, y, line_width=2) show(p) else: justright = False if width > 3: x = xydata[:, 1] y = xydata[:, 2] p = figure(title=charttitle, title_text_font_size='13pt', width=300, height=300) p.line(x, y, line_width=2) show(p) print("Array has more than three columns. May not yield correct results") else: justright = False print("Array doesn't have enough columns") return justright def sidebyside(xydata1, xydata2, charttitle1, charttitle2): """ Plots two 3-column numpy arrays of trajectory data (frames in column 1, x coordinates in column 2, y coordinates in column 3) next to each other in iPython notebook. Note: MUST have run output_notebook in order to run successfully. Input: 2 numpy arrays, 2 chart titles (strings) Output: displays trajectory plots inline. """ length1 = xydata1.shape[0] width1 = xydata1.shape[1] justright = True length2 = xydata2.shape[0] width2 = xydata2.shape[1] if width1 == 3 and width2 == 3: x1 = xydata1[:, 1] y1 = xydata1[:, 2] x2 = xydata2[:, 1] y2 = xydata2[:, 2] s1 = figure(title=charttitle1, title_text_font_size='13pt', width=300, height=300) s1.line(x1, y1, color='navy', line_width=2) s2 = figure(title=charttitle2, title_text_font_size='13pt', width=300, height=300, x_range=s1.x_range, y_range=s1.y_range) s2.line(x2, y2, color='firebrick', line_width=2) p = gridplot([[s1, s2]]) show(p) else: justright = False if width1 > 3 or width2 > 3: x1 = xydata1[:, 1] y1 = xydata1[:, 2] x2 = xydata2[:, 1] y2 = xydata2[:, 2] s1 = figure(title=charttitle1, width=300, height=300, x_axis_label='x', y_axis_label='y') s1.line(x1, y1, color='navy', line_width=2) s2 = figure(title=charttitle2, width=300, height=300, x_range=s1.x_range, y_range=s1.y_range, x_axis_label='x', y_axis_label='y') s2.line(x2, y2, color='firebrick', line_width=2) p = hplot(s1, s2) show(p) print("At least one of the given arrays has more than three columns. May not yield corect results.") else: print("One of the given arrays has less than three columns. Data could not be plotted.") return justright def overlay(xydata1, xydata2, charttitle): """ Plots two 3-column numpy arrays of trajectory data (frames in column 1, x coordinates in column 2, y coordinates in column 3) superimposed upon each other in iPython notebook. Note: MUST have run output_notebook in order to run successfully. Input: 2 numpy arrays, 2 chart titles (strings) Output: displays trajectory plots inline. """ length1 = xydata1.shape[0] width1 = xydata1.shape[1] justright = True length2 = xydata2.shape[0] width2 = xydata2.shape[1] if width1 == 3 and width2 == 3: x1 = xydata1[:, 1] y1 = xydata1[:, 2] x2 = xydata2[:, 1] y2 = xydata2[:, 2] p = figure(title=charttitle, title_text_font_size='9pt', width=300, height=300, x_axis_label='x', y_axis_label='y') p.line(x1, y1, line_width=2, color='navy') p.line(x2, y2, line_width=2, color='firebrick') show(p) else: justright = False if width1 > 3 or width2 > 3: x1 = xydata1[:, 1] y1 = xydata1[:, 2] x2 = xydata2[:, 1] y2 = xydata2[:, 2] p = figure(title=charttitle, width=300, height=300) p.line(x1, y1, line_width=2, color='navy') p.line(x2, y2, line_width=2, color='firebrick') show(p) print("At least one of the given arrays has more than three columns. May not yield corect results.") else: print("One of the given arrays has less than three columns. Data could not be plotted.") return justright def animated_plot(xydata): """ I haven't been able to generate a working code for an animated plot function and I haven't been able to find out why. Whenever I try, I get an error message saying that index cannot be defined. """ b = xydata xlist = b[:, 1] ylist = b[:, 2] # create a plot and style its properties p = figure(x_range=(min(xlist), max(xlist)), y_range=(min(ylist), max(ylist)), toolbar_location=None) # add a text renderer to out plot (no data yet) r = p.line(x=[], y=[], line_width=3, color='navy') session = push_session(curdoc()) index = 0 ds = r.data_source # create a callback that will add the next point of the trajectory data def callback(): global index ds.data['x'].append(xlist[index]) ds.data['y'].append(ylist[index]) ds.trigger('data', ds.data, ds.data) index = index + 1 curdoc().add_periodic_callback(callback, 67) # open the document in a browser session.show() # run forever session.loop_until_closed()
mimc7580/disease_studies
trajectory_visualization.py
Python
gpl-2.0
11,383
############################################################################# ## ## Copyright (C) 2006-2007 University of Utah. All rights reserved. ## ## This file is part of VisTrails. ## ## This file may be used under the terms of the GNU General Public ## License version 2.0 as published by the Free Software Foundation ## and appearing in the file LICENSE.GPL included in the packaging of ## this file. Please review the following to ensure GNU General Public ## Licensing requirements will be met: ## http://www.opensource.org/licenses/gpl-license.php ## ## If you are unsure which license is appropriate for your use (for ## instance, you are interested in developing a commercial derivative ## of VisTrails), please contact us at contact@vistrails.org. ## ## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE ## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. ## ############################################################################ import itk import core.modules from core.modules.vistrails_module import Module, ModuleError from ITK import * from Image import Image class MeanImageFilter(Module): my_namespace = 'Filter|Neighborhood' def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] self.filter_ = itk.MeanImageFilter[inImgType, outImgType].New(im.getImg()) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) #set results self.set_output("Output Image", outIm) self.set_output("Filter", self) self.set_output("Output PixelType", outPixelType) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Mean Image Filter", namespace=cls.my_namespace) reg.add_input_port(cls, "Input Image", (Image, 'Input Image')) reg.add_input_port(cls, "Input PixelType", (PixelType, 'Input PixelType'),True) reg.add_input_port(cls, "Output PixelType", (PixelType, 'Output PixelType'),True) reg.add_input_port(cls, "Dimension", (basic.Integer, 'Dimension'),True) reg.add_output_port(cls, "Output Image", (Image, 'Output Image')) reg.add_output_port(cls, "Filter", (Filter, 'Filter'), True) reg.add_output_port(cls, "Output PixelType", (PixelType, 'Output PixelType'),True) class MedianImageFilter(Module): my_namespace = 'Filter|Neighborhood' def compute(self): im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] self.filter_ = itk.MedianImageFilter[inImgType, outImgType].New(im.getImg()) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) #set results self.set_output("Output Image", outIm) self.set_output("Filter", self) self.set_output("Output PixelType", outPixelType) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Median Image Filter", namespace=cls.my_namespace) reg.add_input_port(cls, "Input Image", (Image, 'Input Image')) reg.add_input_port(cls, "Input PixelType", (PixelType, 'Input PixelType'),True) reg.add_input_port(cls, "Output PixelType", (PixelType, 'Output PixelType'),True) reg.add_input_port(cls, "Dimension", (basic.Integer, 'Dimension'),True) reg.add_output_port(cls, "Output Image", (Image, 'Output Image')) reg.add_output_port(cls, "Filter", (Filter, 'Filter'), True) reg.add_output_port(cls, "Output PixelType", (PixelType, 'Output PixelType'),True) #TODO does this filter even modify the image? class BinaryErodeImageFilter(Module): my_namespace = 'Filter|Neighborhood' def compute(self): print "comput" im = self.get_input("Input Image") #check for input PixelType if self.has_input("Input PixelType"): inPixelType = self.get_input("Input PixelType") else: inPixelType = im.getPixelType() #check for output PixelType if self.has_input("Output PixelType"): outPixelType = self.get_input("Output PixelType") else: outPixelType = inPixelType #check for dimension if self.has_input("Dimension"): dim = self.get_input("Dimension") else: dim = im.getDim() kernel = self.get_input("Kernel") #set up filter inImgType = itk.Image[inPixelType._type, dim] outImgType = itk.Image[outPixelType._type, dim] erode_value = self.get_input("Erode Value") self.filter_ = itk.BinaryErodeImageFilter[inImgType, outImgType, kernel].New(im.getImg()) self.filter_.SetKernel(kernel) self.filter_.SetErodeValue(erode_value) self.filter_.Update() #setup output image outIm = Image() outIm.setImg(self.filter_.GetOutput()) outIm.setPixelType(outPixelType) outIm.setDim(dim) #set results self.set_output("Output Image", outIm) self.set_output("Filter", self) self.set_output("Output PixelType", outPixelType) @classmethod def register(cls, reg, basic): reg.add_module(cls, name="Binary Erode Image Filter", namespace=cls.my_namespace) reg.add_input_port(cls, "Input Image", (Image, 'Input Image')) reg.add_input_port(cls, "Input PixelType", (PixelType, 'Input PixelType'),True) reg.add_input_port(cls, "Output PixelType", (PixelType, 'Output PixelType'),True) reg.add_input_port(cls, "Dimension", (basic.Integer, 'Dimension'),True) reg.add_input_port(cls, "Kernel", (Kernel, 'Kernel')) reg.add_input_port(cls, "Erode Value", (basic.Integer, 'Erode Value')) reg.add_output_port(cls, "Output Image", (Image, 'Output Image')) reg.add_output_port(cls, "Filter", (Filter, 'Filter'), True) reg.add_output_port(cls, "Output PixelType", (PixelType, 'Output PixelType'),True)
VisTrails/vistrails-contrib-legacy
itk/NeighborhoodFilters.py
Python
bsd-3-clause
7,534
from submitify.tests import ( TestCase, # CallMixin, # GuidelineMixin, # NotificationMixin, # ReviewMixin, # SubmissionMixin, # UserMixin, ) class TestListCalls(TestCase): def test_lists_open_calls(self): self.assertTrue(True) def test_lists_other_calls_if_asked(self): pass class TestViewCall(TestCase): def test_view_call(self): pass def test_lists_notifications(self): pass def test_can_submit_call_open_only(self): pass def test_can_submit_invite_only(self): pass def test_can_submit_if_reader(self): pass class TestCreateCall(TestCase): def test_form_renders(self): pass def test_form_saves(self): pass def test_guidelines_save(self): pass class TestEditCall(TestCase): def test_owner_only(self): pass def test_form_renders(self): pass def test_form_saves(self): pass def test_guidelines_save(self): pass class TestInviteReader(TestCase): def test_reader_invited(self): pass def test_cant_invite_owner(self): pass class TestInviteWriter(TestCase): def test_writer_invited(self): pass def test_cant_invite_owner(self): pass def test_cant_invite_unless_invite_only(self): pass class TestNextStep(TestCase): def test_owner_only(self): pass def test_call_advanced(self): pass def test_cant_proceed_beyond_max(self): pass def test_cant_proceed_to_finished_with_unreviewed_submissions(test): pass def test_moves_submissions_to_review_if_closing(test): pass
OpenFurry/submitify
submitify/views/test_calls.py
Python
mit
1,704
# vi: syntax=python:et:ts=4 import os def run_pkg_config(context, name): env = context.env try: env["ENV"]["PKG_CONFIG_PATH"] = os.environ.get("PKG_CONFIG_PATH") env.ParseConfig("pkg-config --libs --cflags --silence-errors $PKGCONFIG_FLAGS \"" + name + "\"") context.Log("Found '" + name + "' with pkg-config.\n") return True except OSError: context.Log("Failed to find '" + name + "' with pkg-config.\n") return False def CheckPKG(context, name): env = context.env context.Message( 'Checking for %s... ' % name ) if run_pkg_config(context, name): context.Result("yes") return True else: context.Result("no") return False config_checks = { "CheckPKG" : CheckPKG }
RushilPatel/BattleForWesnoth
scons/pkgconfig.py
Python
gpl-2.0
776
# -*- coding: utf-8 -*- # # Copyright (C) 2003-2012 Sebastien Helleu <flashcode@flashtux.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # Display name days in bar item and buffer. # Currently, only french calendar is supported. # (this script requires WeeChat 0.3.0 or newer) # # History: # # 2012-02-02, Sebastien Helleu <flashcode@flashtux.org>: # version 1.3: add option "reminder" # 2012-01-03, Sebastien Helleu <flashcode@flashtux.org>: # version 1.2: make script compatible with Python 3.x # 2011-10-30, Sebastien Helleu <flashcode@flashtux.org>: # version 1.1: fix colors in output of /nameday # 2011-05-06, Sebastien Helleu <flashcode@flashtux.org>: # version 1.0: add some missing names and color based on gender # 2010-01-14, Sebastien Helleu <flashcode@flashtux.org>: # version 0.9: add color options and options to display dates in bar item # 2010-01-13, Sebastien Helleu <flashcode@flashtux.org>: # version 0.8: conversion to python (script renamed to nameday.py), # conversion to WeeChat 0.3.0+ # 2007-08-10, Sebastien Helleu <flashcode@flashtux.org>: # version 0.7 # 2003-12-06, Sebastien Helleu <flashcode@flashtux.org>: # version 0.1: initial release (fete.pl) # SCRIPT_NAME = 'nameday' SCRIPT_AUTHOR = 'Sebastien Helleu <flashcode@flashtux.org>' SCRIPT_VERSION = '1.3' SCRIPT_LICENSE = 'GPL3' SCRIPT_DESC = 'Display name days in bar item and buffer' SCRIPT_COMMAND = 'nameday' SCRIPT_BAR_ITEM = 'nameday' import_ok = True try: import weechat except ImportError: print('This script must be run under WeeChat.') print('Get WeeChat now at: http://www.weechat.org/') import_ok = False try: import sys, time, unicodedata, re from datetime import date except ImportError as message: print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message)) import_ok = False # script options nameday_settings_default = { 'country' : ('fr', 'country, only "fr" (french) is currently available'), 'days' : ('1', 'number of days after current one to display in bar item'), 'item_date_today' : ('on', 'display date for today in bar item'), 'item_date_next' : ('off', 'display dates for tomorrow and next days in bar item'), 'item_name_gender' : ('off', 'display gender (St/Ste) before name'), 'item_color_date_today' : ('white', 'color for date in item (today)'), 'item_color_name_today' : ('green', 'color for name in item (today)'), 'item_color_date_next' : ('default', 'color for date in item (next days)'), 'item_color_name_next' : ('default', 'color for name in item (next days)'), 'item_color_male_today' : ('cyan', 'color for male names in item (today)'), 'item_color_female_today': ('magenta', 'color for female names in item (today)'), 'item_color_male_next' : ('cyan', 'color for male names in item (next days)'), 'item_color_female_next' : ('magenta', 'color for female names in item (next days)'), 'color_male' : ('cyan', 'color for male names'), 'color_female' : ('magenta', 'color for female names'), 'reminder' : ('', 'comma-separated list of names or dates (format: DD/MM) for which a reminder is displayed'), } nameday_settings = {} nameday_item = '' nameday_buffer = '' namedays = { 'fr': ( # january ('!Marie (JOUR DE L\'AN)', '&Basile', '!Geneviève', '&Odilon', '&Edouard', '&Mélaine', '&Raymond,&Cédric,!Virginie', '&Lucien', '!Alix', '&Guillaume', '&Paulin', '!Tatiana', '!Yvette', '!Nina', '&Rémi,!Rachel', '&Marcel', '!Roseline', '!Prisca', '&Marius', '&Sébastien', '!Agnès', '&Vincent', '&Barnard', '&François de Sales', 'Conversion de St Paul', '!Paule', '!Angèle', '&Thomas d\'Aquin', '&Gildas', '!Martine,!Jacinthe', '!Marcelle'), # february ('!Ella,&Siméon', 'Présentation,&Théophane', '&Blaise', '!Véronique', '!Agathe', '&Gaston,!Dorothée', '!Eugénie', '!Jacqueline', '!Apolline', '&Arnaud', 'Notre-Dame de Lourdes', '&Félix', '!Béatrice', '&Valentin', '&Claude,&Jordan', '!Julienne,!Lucile', '&Alexis', '!Bernadette', '&Gabin', '!Aimée', '&Damien', '!Isabelle', '&Lazare', '&Modeste', '&Roméo', '&Nestor', '!Honorine', '&Romain', '&Auguste'), # march ('&Aubin,&Albin,&Jonathan', '&Charles le Bon', '&Guénolé,&Marin', '&Casimir', '!Olive,!Olivia', '&Colette', '!Félicité,&Nathan', '&Ryan', '!Françoise', '&Vivien', '!Rosine', '!Justine,&Pol', '&Rodrigue', '!Mathilde', '!Louise', '!Bénédicte', '&Patrice,&Patrick', '&Cyrille', '&Joseph', '&Herbert', '!Clémence,!Axelle', '!Léa', '&Victorien', '!Catherine', '&Humbert', '!Larissa', '&Habib', '&Gontran', '!Gwladys', '&Amédée', '&Benjamin'), # april ('&Hugues,&Valéry', '!Sandrine', '&Richard', '&Isodore', '!Irène', '&Marcellin', '&Jean-Baptiste de la Salle,&Clotaire', '!Julie', '&Gautier', '&Fulbert', '&Stanislas', '&Jules', '!Ida', '&Maxime,!Ludivine', '&César,&Paterne', '&Benoît-Joseph Labre', '&Anicet', '&Parfait', '!Emma', '!Odette', '&Anselme', '&Alexandre', '&Georges', '&Fidèle', '&Marc', '!Alida', '!Zita', '!Valérie', '!Catherine de Sienne', '&Robert'), # may ('&Jérémie (FETE du TRAVAIL)', '&Boris,!Zoé', '&Philippe,&Jacques', '&Sylvain,&Florian', '!Judith', '!Prudence', '!Gisèle', '&Désiré (ANNIVERSAIRE 1945)', '&Pacôme', '!Solange', '!Estelle', '&Achille', '!Rolande,&Maël', '&Mathias,!Aglaé', '!Denise', '&Honoré,&Brendan', '&Pascal', '&Eric,!Corinne', '&Yves,&Erwan', '&Bernardin', '&Constantin', '&Emile,!Rita', '&Didier', '&Donatien', '!Sophie', '&Bérenger', '&Augustin', '&Germain', '&Aymar,!Géraldine', '&Ferdinand,!Jeanne', 'Pétronille'), # june ('&Justin,&Ronan', '!Blandine', '&Kévin', '!Clotilde', '&Igor', '&Norbert', '&Gilbert', '&Médard', '!Diane', '&Landry', '&Barnabé,!Yolande', '&Guy', '&Antoine de Padoue', '&Elisée,&Valère', '!Germaine', '&François-Régis,&Régis', '&Hervé', '&Léonce', '&Romuald,&Gervais,!Micheline', '&Silvère', '&Rodolphe', '&Alban', '!Audrey', '&Jean-Baptiste', '&Salomon,&Prosper,!Aliénor,!Eléonore', '&Anthelme', '&Fernand', '&Irénée', '&Pierre,&Paul', '&Martial,&Adolphe'), # july ('&Thierry,!Esther', '&Martinien', '&Thomas', '&Florent', '&Antoine', '!Nolwen,!Mariette', '&Raoul', '&Thibaut,&Edgar,&Kilian,!Priscilla', '!Amandine,!Hermine,!Marianne', '&Ulrich', '&Benoît,!Olga', '&Olivier,&Jason', '&Henri,&Joël,&Enzo,&Eugène', '!Camille (FETE NATIONALE)', '&Donald,&Vladimir', '!Elvire', '!Charlotte,!Arlette,!Marcelline', '&Frédéric', '&Arsène', '!Marina', '&Victor', '!Madeleine', '!Brigitte', '!Christine,!Ségolène', '&Jacques,!Valentine', '!Anne,!Hannah,&Joachin', '!Nathalie', '&Samson', '!Marthe,!Béatrix,&Loup', '!Juliette', '&Ignace de Loyola'), # august ('&Alphonse', '&Julien', '!Lydie', '&Vianney', '&Abel', 'Transfiguration', '&Gaëtan', '&Dominique', '&Amour', '&Laurent', '!Claire,!Gilberte,!Suzanne', '!Clarisse', '&Hippolyte', '&Evrard', '!Marie,&Alfred (ASSOMPTION)', '&Armel', '&Hyacinthe', '!Hélène,!Laetitia', '&Jean Eudes', '&Bernard,&Samuel', '&Christophe,!Grâce', '&Fabrice', '!Rose de Lima', '&Barthélémy', '&Louis', '!Natacha', '!Monique', '&Augustin,&Elouan', '!Sabine,&Médéric', '&Fiacre', '&Aristide'), # september ('&Gilles', '!Ingrid', '&Grégoire', '!Rosalie,!Iris,&Moïse', '!Raïssa', '&Bertrand,!Eva', '!Reine', '&Adrien,!Béline', '&Alain,&Omer', '!Inès', '&Adelphe,!Glenn,!Vinciane', '&Apollinaire', '&Aimé', 'La Ste Croix', '&Roland,!Dolorès', '!Edith', '&Renaud,&Lambert', '!Nadège,!Véra', '!Emilie', '&Davy', '&Matthieu,!Déborah', '&Maurice', '&Constant', '!Thècle', '&Hermann', '&Côme,&Damien', '&Vincent de Paul', '&Venceslas', '&Michel,&Gabriel,&Raphaël', '&Jérôme'), # october ('!Thérèse de l\'Enfant Jésus', '&Léger', '&Gérard', '&François d\'Assise', '!Fleur,!Chloé', '&Bruno', '&Serge,&Gustave', '!Pélagie,&Thaïs', '&Denis', '&Ghislain,&Virgile', '&Firmin', '&Wilfried,&Edwin', '&Géraud', '&Juste,!Céleste,!Gwendoline', '!Thérèse d\'Avila', '!Edwige', '&Baudouin,!Solène', '&Luc', '&René,!Cléo', '!Adeline,!Aline', '!Céline,!Ursule', '!Elodie,!Salomé', '&Jean de Capistran', '&Florentin', '&Crépin', '&Dimitri', '!Emeline', '&Simon,&Jude', '&Narcisse', '!Bienvenue,!Maéva', '&Quentin'), # november ('&Harold (TOUSSAINT)', 'Défunts', '&Hubert,&Gwenaël', '&Charles,&Aymeric', '!Sylvie,&Zacharie', '!Bertille,&Léonard', '!Carine', '&Geoffroy', '&Théodore', '&Léon,&Noah,&Noé,!Mélissa', '&Martin (ARMISTICE 1918)', '&Christian', '&Brice', '&Sidoine', '&Albert,&Arthur,&Léopold,!Victoire', '!Marguerite,!Mégane,!Gertrude', '!Elisabeth,!Elise,!Hilda', '!Aude', '&Tanguy', '&Edmond,&Octave', 'Présentation de Marie', '!Cécile', '&Clément', '!Flora', '!Catherine', '!Delphine', '&Séverin', '&Jacques de la Marche', '&Saturnin', '&André'), # december ('!Florence', '!Viviane', '&Xavier', '!Barbara', '&Gérald', '&Nicolas', '&Ambroise', 'Immaculée Conception', '&Pierre Fourier', '&Romaric', '&Daniel', '!Chantal', '!Lucie,&Jocelyn', '!Odile', '!Ninon', '!Alice', '&Gaël', '&Gatien', '&Urbain', '&Abraham,&Théophile', '&Pierre Canisius', '!Françoise-Xavier', '&Armand', '!Adèle', '&Emmanuel,&Manuel (NOEL)', '&Etienne', '&Jean', '&Gaspard', '&David', '&Roger', '&Sylvestre,!Colombe'), ) } nameday_i18n = { 'fr': { 'm' : 'St ', 'f' : 'Ste ', }, } def nameday_remove_accents(string): """Remove accents from string.""" if sys.version_info >= (3,): # python 3.x return unicodedata.normalize('NFKD', string).encode('ASCII', 'ignore').decode('UTF-8') else: # python 2.x return unicodedata.normalize('NFKD', unicode(string, 'UTF-8')).encode('ASCII', 'ignore') def nameday_get_country(): """Return country.""" global nameday_settings, namedays country = nameday_settings['country'] if not namedays[country]: country = 'fr' return country def nameday_decode(name, gender, colorMale, colorFemale): """Decode name: replace special chars and optionally add color.""" global nameday_i18n, nameday_settings country = nameday_get_country() replacement = { '&': '', '!': '' } if colorMale: replacement['&'] = weechat.color(nameday_settings[colorMale]) if colorFemale: replacement['!'] = weechat.color(nameday_settings[colorFemale]) if gender: replacement['&'] += nameday_i18n[country]['m'] replacement['!'] += nameday_i18n[country]['f'] return name.replace('&', replacement['&']).replace('!', replacement['!']).replace(',', ', ') def nameday_get_month_day(month, day, gender, colorMale, colorFemale): """Get name day for given day/month.""" global namedays try: country = nameday_get_country() name = namedays[country][month][day] return nameday_decode(name, gender, colorMale, colorFemale) except: return '' def nameday_get_date(name_date, gender, colorMale, colorFemale): """Get name day for given date.""" return nameday_get_month_day(name_date.month - 1, name_date.day - 1, gender, colorMale, colorFemale) def nameday_completion_namedays_cb(data, completion_item, buffer, completion): """Complete with name days, for command '/nameday'.""" global namedays country = nameday_get_country() for names in namedays[country]: for string in names: pos = string.find('(') if pos > 0: string = string[0:pos].strip() for name in string.split(','): name2 = nameday_decode(name, gender=False, colorMale='', colorFemale='') weechat.hook_completion_list_add(completion, name2, 0, weechat.WEECHAT_LIST_POS_SORT) weechat.hook_completion_list_add(completion, nameday_remove_accents(name2), 0, weechat.WEECHAT_LIST_POS_SORT) return weechat.WEECHAT_RC_OK def nameday_buffer_input_cb(data, buffer, input_data): """Input callback for buffer.""" if input_data.lower() == 'q': weechat.buffer_close(buffer) return weechat.WEECHAT_RC_OK def nameday_buffer_close_cb(data, buffer): """Callback called when buffer is closed.""" global nameday_buffer nameday_buffer = '' return weechat.WEECHAT_RC_OK def nameday_display_list(buffer): """Display list of name days in buffer.""" global namedays country = nameday_get_country() today = date.today() month = 0 while month < len(namedays[country]): day = 0 while day < len(namedays[country][month]): color = '' if today.month - 1 == month and today.day - 1 == day: color = weechat.color('yellow') weechat.prnt(buffer, '%s%02d/%02d %s' % (color, (day + 1), (month + 1), nameday_get_month_day(month, day, gender=True, colorMale='color_male', colorFemale='color_female'))) day += 1 month += 1 def nameday_list(): """Open buffer and display list of name days.""" global nameday_buffer if nameday_buffer: weechat.buffer_set(nameday_buffer, 'display', '1') else: nameday_buffer = weechat.buffer_search('python', 'nameday'); if not nameday_buffer: nameday_buffer = weechat.buffer_new('nameday', 'nameday_buffer_input_cb', '', 'nameday_buffer_close_cb', ''); if nameday_buffer: weechat.buffer_set(nameday_buffer, 'localvar_set_no_log', '1') weechat.buffer_set(nameday_buffer, 'time_for_each_line', '0') weechat.buffer_set(nameday_buffer, 'display', '1') weechat.buffer_set(nameday_buffer, 'title', 'Name days | Commands: list, listfull') nameday_display_list(nameday_buffer) def nameday_print(days): """Print name day for today and option N days in future.""" global nameday_i18n today = date.today() current_time = time.time() string = '%02d/%02d: %s' % (today.day, today.month, nameday_get_date(today, gender=True, colorMale='color_male', colorFemale='color_female')) if days < 0: days = 0 elif days > 50: days = 50 if days > 0: string += '%s (' % weechat.color('reset') for i in range(1, days + 1): if i > 1: string += '%s, ' % weechat.color('reset') date2 = date.fromtimestamp(current_time + ((3600 * 24) * i)) string += '%02d/%02d: %s' % (date2.day, date2.month, nameday_get_date(date2, gender=True, colorMale='color_male', colorFemale='color_female')) string += '%s)' % weechat.color('reset') weechat.prnt('', string) def nameday_reminder(month=0, day=0, tag='notify_highlight'): """Display reminder for given date (or nothing if no reminder defined for today).""" global namedays, nameday_settings country = nameday_get_country() if month < 1 or day < 1: today = date.today() month = today.month day = today.day nameday = nameday_remove_accents(namedays[country][month - 1][day - 1]).lower() nameday_words = re.sub('[^a-z ]', '', nameday.replace(',', ' ')).split() reminder = False for name in nameday_settings['reminder'].split(','): if name: pos = name.find('/') if pos >= 0: if day == int(name[:pos]) and month == int(name[pos+1:]): reminder = True break else: wordsfound = True for word in name.strip().lower().split(): if word and word not in nameday_words: wordsfound = False if wordsfound: reminder = True break if reminder: weechat.prnt_date_tags('', 0, tag, '*\tReminder: %02d/%02d: %s' % (day, month, nameday_get_month_day(month - 1, day - 1, gender=True, colorMale='color_male', colorFemale='color_female'))) def nameday_search(name): """Search a name.""" global namedays user_nameday = nameday_remove_accents(name).lower() country = nameday_get_country() month = 0 while month < len(namedays[country]): day = 0 while day < len(namedays[country][month]): nameday = nameday_remove_accents(namedays[country][month][day]) if (nameday.lower().find(user_nameday) >= 0): weechat.prnt('', '%02d/%02d: %s' % ((day + 1), (month + 1), nameday_get_month_day(month, day, gender=True, colorMale='color_male', colorFemale='color_female'))) day += 1 month += 1 def nameday_search_reminders(): """Search and display dates for reminders.""" global namedays country = nameday_get_country() month = 0 while month < len(namedays[country]): day = 0 while day < len(namedays[country][month]): nameday_reminder(month + 1, day + 1, '') day += 1 month += 1 def nameday_cmd_cb(data, buffer, args): """Command /nameday.""" if args: if args == '*': nameday_list() elif args == '!': nameday_search_reminders() elif args.isdigit(): nameday_print(int(args)) elif args.find('/') >= 0: pos = args.find('/') if pos > 0: day = int(args[:pos]) month = int(args[pos+1:]) name = nameday_get_month_day(month - 1, day - 1, gender=True, colorMale='color_male', colorFemale='color_female') if name != '': weechat.prnt('', '%02d/%02d: %s' % (day, month, name)) else: nameday_search(args) else: nameday_print(1) nameday_reminder() return weechat.WEECHAT_RC_OK def nameday_item_cb(data, buffer, args): """Callback for building nameday item.""" global nameday_item return nameday_item def nameday_build_item(): """Build nameday item.""" global nameday_settings, nameday_item nameday_item = '' display_date_today = nameday_settings['item_date_today'].lower() == 'on' display_date_next = nameday_settings['item_date_next'].lower() == 'on' display_gender = nameday_settings['item_name_gender'].lower() == 'on' color_date_today = weechat.color(nameday_settings['item_color_date_today']) color_name_today = weechat.color(nameday_settings['item_color_name_today']) color_date_next = weechat.color(nameday_settings['item_color_date_next']) color_name_next = weechat.color(nameday_settings['item_color_name_next']) color_default = weechat.color('default') today = date.today() if display_date_today: nameday_item += '%s%02d/%02d%s: ' % (color_date_today, today.day, today.month, color_default) nameday_item += '%s%s' % (color_name_today, nameday_get_date(today, gender=display_gender, colorMale='item_color_male_today', colorFemale='item_color_female_today')) days = 0 try: days = int(nameday_settings['days']) except: days = 0 if days < 0: days = 0 if days > 10: days = 10 if days > 0: nameday_item += '%s (' % color_default current_time = time.time() for i in range(1, days + 1): if i > 1: nameday_item += ', ' date2 = date.fromtimestamp(current_time + ((3600 * 24) * i)) if display_date_next: nameday_item += '%s%02d/%02d%s: ' % (color_date_next, date2.day, date2.month, color_default) nameday_item += '%s%s' % (color_name_next, nameday_get_date(date2, gender=display_gender, colorMale='item_color_male_next', colorFemale='item_color_female_next')) nameday_item += '%s)' % color_default return nameday_item def nameday_timer_cb(data, remaining_calls): """Called each day at midnight to change item content.""" nameday_build_item() weechat.bar_item_update('nameday') nameday_reminder() return weechat.WEECHAT_RC_OK def nameday_load_config(): global nameday_settings_default, nameday_settings version = weechat.info_get('version_number', '') or 0 for option, value in nameday_settings_default.items(): if weechat.config_is_set_plugin(option): nameday_settings[option] = weechat.config_get_plugin(option) else: weechat.config_set_plugin(option, value[0]) nameday_settings[option] = value[0] if int(version) >= 0x00030500: weechat.config_set_desc_plugin(option, value[1]) def nameday_config_cb(data, option, value): """Called each time an option is changed.""" nameday_load_config() nameday_build_item() weechat.bar_item_update('nameday') return weechat.WEECHAT_RC_OK if __name__ == '__main__' and import_ok: if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''): # load config nameday_load_config() # new command weechat.hook_completion('namedays', 'list of name days', 'nameday_completion_namedays_cb', '') weechat.hook_command(SCRIPT_COMMAND, 'Display name days', '[* | number | date | name | !]', ' *: display list of name days in a new buffer\n' 'number: display name day for today and <number> days in future\n' ' date: display name day for this date, format is day/month (for example: 31/01)\n' ' name: display date for this name\n' ' !: display reminder dates for names defined in option "reminder"\n\n' 'A bar item "nameday" can be used in a bar.\n\n' 'Examples:\n' ' /nameday * display list of name days in a new buffer\n' ' /nameday display name day for today and tomorrow\n' ' /nameday 2 display name day for today, tomorrow, and after tomorrow\n' ' /nameday 20/01 display name day for january, 20th\n' ' /nameday sébastien display day for name "sébastien"', '*|!|%(namedays)', 'nameday_cmd_cb', '') # new item nameday_build_item() weechat.bar_item_new(SCRIPT_BAR_ITEM, 'nameday_item_cb', '') # timer weechat.hook_timer(3600 * 24 * 1000, 3600 * 24, 0, 'nameday_timer_cb', '') # config weechat.hook_config('plugins.var.python.' + SCRIPT_NAME + '.*', 'nameday_config_cb', '') # reminder nameday_reminder()
qguv/config
weechat/plugins/python/nameday.py
Python
gpl-3.0
25,821
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Idea' db.create_table(u'ideas_idea', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('deleted', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=150)), ('content', self.gf('django.db.models.fields.TextField')()), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='created_by', null=True, to=orm['community.User'])), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='updated_by', null=True, to=orm['community.User'])), ('completed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('completed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='completed_by', null=True, to=orm['community.User'])), ('completed_post', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['posts.PostType'], null=True, blank=True)), ('status', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=2)), ('ip', self.gf('django.db.models.fields.IPAddressField')(max_length=15)), ('rating_likes', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)), ('rating_dislikes', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)), ('rating_sum', self.gf('django.db.models.fields.IntegerField')(default=0, blank=True)), ('rating_ratio', self.gf('django.db.models.fields.FloatField')(default=0, blank=True)), )) db.send_create_signal(u'ideas', ['Idea']) def backwards(self, orm): # Deleting model 'Idea' db.delete_table(u'ideas_idea') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'community.user': { 'Meta': {'object_name': 'User'}, 'comments': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'dislikes': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'likes': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'subscribe_newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'ideas.idea': { 'Meta': {'object_name': 'Idea'}, 'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'completed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'completed_by'", 'null': 'True', 'to': u"orm['community.User']"}), 'completed_post': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.PostType']", 'null': 'True', 'blank': 'True'}), 'content': ('django.db.models.fields.TextField', [], {}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_by'", 'null': 'True', 'to': u"orm['community.User']"}), 'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'rating_dislikes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'rating_likes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'rating_ratio': ('django.db.models.fields.FloatField', [], {'default': '0', 'blank': 'True'}), 'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updated_by'", 'null': 'True', 'to': u"orm['community.User']"}) }, u'posts.category': { 'Meta': {'object_name': 'Category'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}) }, u'posts.posttype': { 'Meta': {'object_name': 'PostType'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['posts.Category']"}), 'closed_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_created'", 'null': 'True', 'to': u"orm['community.User']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'last_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'last_content_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'published_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_published'", 'null': 'True', 'to': u"orm['community.User']"}), 'rating_dislikes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'rating_likes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'rating_ratio': ('django.db.models.fields.FloatField', [], {'default': '0', 'blank': 'True'}), 'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_updated'", 'null': 'True', 'to': u"orm['community.User']"}), 'views_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}) } } complete_apps = ['ideas']
nephthys/Nouweo
ideas/migrations/0001_initial.py
Python
agpl-3.0
11,066
#!/usr/bin/env python3 from pycnic.core import WSGI, Handler import logging import sys my_logger = logging.Logger(__name__) my_logger.setLevel(logging.DEBUG) hnd = logging.StreamHandler(sys.stdout) my_logger.addHandler(hnd) """ overrides.py This example includes special methods, functions, and properties with example usages. """ def my_before(handler): my_logger.info("Before, request IP is %s"%(handler.request.ip)) def my_after(handler): my_logger.info("After, headers are %s"%(handler.response.headers)) class Howdy(Handler): def before(self): """ Called before the request is routed """ my_logger.info("Howdy before called") def after(self): """ Called after the request is routed """ my_logger.info("Howdy after called") def get(self): assert self.request.method == "GET" return {} def post(self): assert self.request.method == "POST" return {} def put(self): assert self.request.method == "PUT" return {} def delete(self): assert self.request.method == "DELETE" return {} class app(WSGI): # A method name to call before the request is routed # default: None before = my_before # A method name to call after the request is routed # default: None after = my_after # Assign a custom logger, default is logging.Logger logger = my_logger # Set debug mode, default False debug = True # Remove the trailing slash for routing purposes # default: True strip_path = True # A list of routes, handler instances routes = [ ('/', Howdy()), ] if __name__ == "__main__": from wsgiref.simple_server import make_server try: print("Serving on 0.0.0.0:8080...") make_server('0.0.0.0', 8080, app).serve_forever() except KeyboardInterrupt: pass print("Done")
nullism/pycnic
examples/overrides.py
Python
mit
1,937
#!/usr/bin/env python # # Copyright 2015 Free Software Foundation, Inc. # # SPDX-License-Identifier: GPL-3.0-or-later # # from .encoder import PolarEncoder from .decoder import PolarDecoder from . import channel_construction as cc from .helper_functions import * import matplotlib.pyplot as plt def get_frozen_bit_position(): # frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 16, 17, 18, 20, 24), dtype=int) # frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int) m = 256 n_frozen = m // 2 frozenbitposition = cc.get_frozen_bit_indices_from_z_parameters(cc.bhattacharyya_bounds(0.0, m), n_frozen) print(frozenbitposition) return frozenbitposition def test_enc_dec_chain(): ntests = 100 n = 256 k = n // 2 frozenbits = np.zeros(n - k) frozenbitposition = get_frozen_bit_position() for i in range(ntests): bits = np.random.randint(2, size=k) encoder = PolarEncoder(n, k, frozenbitposition, frozenbits) decoder = PolarDecoder(n, k, frozenbitposition, frozenbits) encoded = encoder.encode(bits) rx = decoder.decode(encoded) if not is_equal(bits, rx): raise ValueError('Test #', i, 'failed, input and output differ', bits, '!=', rx) return def is_equal(first, second): if not (first == second).all(): result = first == second for i in range(len(result)): print('{0:4}: {1:2} == {2:1} = {3}'.format(i, first[i], second[i], result[i])) return False return True def exact_value(la, lb): return np.log((np.exp(la + lb) + 1) / (np.exp(la + np.exp(lb)))) def approx_value(la, lb): return np.sign(la) * np.sign(lb) * np.minimum(np.abs(la), np.abs(lb)) def path_metric_exact(last_pm, llr, ui): return last_pm + np.log(1 + np.exp(-1. * llr * (1 - 2 * ui))) def path_metric_approx(last_pm, llr, ui): if ui == int(.5 * (1 - np.sign(llr))): return last_pm return last_pm + np.abs(llr) def calculate_path_metric_vector(metric, llrs, us): res = np.zeros(llrs.size) res[0] = metric(0, llrs[0], us[0]) for i in range(1, llrs.size): res[i] = metric(res[i - 1], llrs[i], us[i]) return res def test_1024_rate_1_code(): # effectively a Monte-Carlo simulation for channel polarization. ntests = 10000 n = 256 k = n transition_prob = 0.11 num_transitions = int(k * transition_prob) frozenbits = np.zeros(n - k) frozenbitposition = np.array((), dtype=int) encoder = PolarEncoder(n, k, frozenbitposition, frozenbits) decoder = PolarDecoder(n, k, frozenbitposition, frozenbits) channel_counter = np.zeros(k) possible_indices = np.arange(n, dtype=int) for i in range(ntests): bits = np.random.randint(2, size=k) tx = encoder.encode(bits) np.random.shuffle(possible_indices) tx[possible_indices[0:num_transitions]] = (tx[possible_indices[0:num_transitions]] + 1) % 2 rx = tx recv = decoder.decode(rx) channel_counter += (bits == recv) print(channel_counter) print(np.min(channel_counter), np.max(channel_counter)) np.save('channel_counter_' + str(ntests) + '.npy', channel_counter) def find_good_indices(res, nindices): channel_counter = np.copy(res) good_indices = np.zeros(channel_counter.size) for i in range(nindices): idx = np.argmax(channel_counter) good_indices[idx] = 1 channel_counter[idx] = 0 return good_indices def channel_analysis(): ntests = 10000 filename = 'channel_counter_' + str(ntests) + '.npy' channel_counter = np.load(filename) print(np.min(channel_counter), np.max(channel_counter)) channel_counter[0] = np.min(channel_counter) good_indices = find_good_indices(channel_counter, channel_counter.size // 2) info_bit_positions = np.where(good_indices > 0) print(info_bit_positions) frozen_bit_positions = np.delete(np.arange(channel_counter.size), info_bit_positions) print(frozen_bit_positions) np.save('frozen_bit_positions_n256_k128_p0.11.npy', frozen_bit_positions) good_indices *= 2000 good_indices += 4000 plt.plot(channel_counter) plt.plot(good_indices) plt.show() def merge_first_stage(init_mask): merged_frozen_mask = [] for e in range(0, len(init_mask), 2): v = [init_mask[e]['value'][0], init_mask[e + 1]['value'][0]] s = init_mask[e]['size'] * 2 if init_mask[e]['type'] == init_mask[e + 1]['type']: t = init_mask[e]['type'] merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) else: t = 'RPT' merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) return merged_frozen_mask def merge_second_stage(init_mask): merged_frozen_mask = [] for e in range(0, len(init_mask), 2): if init_mask[e]['type'] == init_mask[e + 1]['type']: t = init_mask[e]['type'] v = init_mask[e]['value'] v.extend(init_mask[e + 1]['value']) s = init_mask[e]['size'] * 2 merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) elif init_mask[e]['type'] == 'ZERO' and init_mask[e + 1]['type'] == 'RPT': t = init_mask[e + 1]['type'] v = init_mask[e]['value'] v.extend(init_mask[e + 1]['value']) s = init_mask[e]['size'] * 2 merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) elif init_mask[e]['type'] == 'RPT' and init_mask[e + 1]['type'] == 'ONE': t = 'SPC' v = init_mask[e]['value'] v.extend(init_mask[e + 1]['value']) s = init_mask[e]['size'] * 2 merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) else: merged_frozen_mask.append(init_mask[e]) merged_frozen_mask.append(init_mask[e + 1]) return merged_frozen_mask def merge_stage_n(init_mask): merged_frozen_mask = [] n_elems = len(init_mask) - (len(init_mask) % 2) for e in range(0, n_elems, 2): if init_mask[e]['size'] == init_mask[e + 1]['size']: if (init_mask[e]['type'] == 'ZERO' or init_mask[e]['type'] == 'ONE') and init_mask[e]['type'] == init_mask[e + 1]['type']: t = init_mask[e]['type'] v = init_mask[e]['value'] v.extend(init_mask[e + 1]['value']) s = init_mask[e]['size'] * 2 merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) elif init_mask[e]['type'] == 'ZERO' and init_mask[e + 1]['type'] == 'RPT': t = init_mask[e + 1]['type'] v = init_mask[e]['value'] v.extend(init_mask[e + 1]['value']) s = init_mask[e]['size'] * 2 merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) elif init_mask[e]['type'] == 'SPC' and init_mask[e + 1]['type'] == 'ONE': t = init_mask[e]['type'] v = init_mask[e]['value'] v.extend(init_mask[e + 1]['value']) s = init_mask[e]['size'] * 2 merged_frozen_mask.append({'value': v, 'type': t, 'size': s}) else: merged_frozen_mask.append(init_mask[e]) merged_frozen_mask.append(init_mask[e + 1]) else: merged_frozen_mask.append(init_mask[e]) merged_frozen_mask.append(init_mask[e + 1]) if n_elems < len(init_mask): merged_frozen_mask.append(init_mask[-1]) return merged_frozen_mask def print_decode_subframes(subframes): for e in subframes: print(e) def find_decoder_subframes(frozen_mask): stages = power_of_2_int(len(frozen_mask)) block_size = 2 ** stages lock_mask = np.zeros(block_size, dtype=int) sub_mask = [] for e in frozen_mask: if e == 1: sub_mask.append(0) else: sub_mask.append(1) sub_mask = np.array(sub_mask, dtype=int) for s in range(0, stages): stage_size = 2 ** s mask = np.reshape(sub_mask, (-1, stage_size)) lock = np.reshape(lock_mask, (-1, stage_size)) for p in range(0, (block_size // stage_size) - 1, 2): l0 = lock[p] l1 = lock[p + 1] first = mask[p] second = mask[p + 1] print(l0, l1) print(first, second) if np.all(l0 == l1): for eq in range(2): if np.all(first == eq) and np.all(second == eq): mask[p].fill(eq) mask[p + 1].fill(eq) lock[p].fill(s) lock[p + 1].fill(s) if np.all(first == 0) and np.all(second == 2): mask[p].fill(2) mask[p + 1].fill(2) lock[p].fill(s) lock[p + 1].fill(s) if np.all(first == 3) and np.all(second == 1): mask[p].fill(3) mask[p + 1].fill(3) lock[p].fill(s) lock[p + 1].fill(s) if s == 0 and np.all(first == 0) and np.all(second == 1): mask[p].fill(2) mask[p + 1].fill(2) lock[p].fill(s) lock[p + 1].fill(s) if s == 1 and np.all(first == 2) and np.all(second == 1): mask[p].fill(3) mask[p + 1].fill(3) lock[p].fill(s) lock[p + 1].fill(s) sub_mask = mask.flatten() lock_mask = lock.flatten() words = {0: 'ZERO', 1: 'ONE', 2: 'RPT', 3: 'SPC'} ll = lock_mask[0] sub_t = sub_mask[0] for i in range(len(frozen_mask)): v = frozen_mask[i] t = words[sub_mask[i]] l = lock_mask[i] # if i % 8 == 0: # print if not l == ll or not sub_mask[i] == sub_t: print('--------------------------') ll = l sub_t = sub_mask[i] print('{0:4} lock {1:4} value: {2} in sub {3}'.format(i, 2 ** (l + 1), v, t)) def systematic_encoder_decoder_chain_test(): print('systematic encoder decoder chain test') block_size = int(2 ** 8) info_bit_size = block_size // 2 ntests = 100 frozenbitposition = cc.get_frozen_bit_indices_from_z_parameters(cc.bhattacharyya_bounds(0.0, block_size), block_size - info_bit_size) encoder = PolarEncoder(block_size, info_bit_size, frozenbitposition) decoder = PolarDecoder(block_size, info_bit_size, frozenbitposition) for i in range(ntests): bits = np.random.randint(2, size=info_bit_size) y = encoder.encode_systematic(bits) u_hat = decoder.decode_systematic(y) assert (bits == u_hat).all() def main(): n = 8 m = 2 ** n k = m // 2 n_frozen = n - k # n = 16 # k = 8 # frozenbits = np.zeros(n - k) # frozenbitposition8 = np.array((0, 1, 2, 4), dtype=int) # frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int) # print(frozenbitposition) # test_enc_dec_chain() # test_1024_rate_1_code() # channel_analysis() # frozen_indices = cc.get_bec_frozen_indices(m, n_frozen, 0.11) # frozen_mask = cc.get_frozen_bit_mask(frozen_indices, m) # find_decoder_subframes(frozen_mask) systematic_encoder_decoder_chain_test() if __name__ == '__main__': main()
trabucayre/gnuradio
gr-fec/python/fec/polar/testbed.py
Python
gpl-3.0
11,512
### Open Provenance February 2016 - https://myveryown.org ### Bitcoin Blockchain Information using python-bitcoinlib ### CBlock Object and Properties ### Donate to Open Provenance: 1opDUZQ9nsL1LJALBdV1dvqSMtcvNj9EC ## Import the modules required import bitcoin import bitcoin.rpc ## Create a proxy object and connect to the bitcoin.rpc myproxy = bitcoin.rpc.Proxy() ## Get the latest CBlock data from bitcoin rpc proxy block_info = myproxy.getblock(myproxy.getblockhash(myproxy.getblockcount())) ## Print the details to the screen. print "----------------------------------------------------------------" print "Bitcoin CBlock Object Information: Block Height ", myproxy.getblockcount() print "----------------------------------------------------------------" print "Block Difficulty: ", block_info.difficulty print "Block nVersion: ", block_info.nVersion print "Block nNonce: ", block_info.nNonce print "Block nBits: ", block_info.nBits print "Block nTime: ", block_info.nTime print "No of Transactions: ", len(block_info.vtx) print " " print "Block Hash: " print bitcoin.core.b2lx(block_info.GetHash()) print " " print "Previous Block Hash: " print bitcoin.core.b2lx(block_info.hashPrevBlock) print " " print "Merkel Root: " print bitcoin.core.b2lx(block_info.hashMerkleRoot) print ' ' print "----------------------------------------------------------------" print "Dump of RAW CBlock Object:" print block_info print "----------------------------------------------------------------" print ' ' exit()
OpenProvenance/python-bitcoinlib-scripting
01-CBlock.py
Python
mit
1,512
from time import time from json import dumps from requests import get, post, exceptions from urllib.parse import urlparse, parse_qs class FakeRequest(object): content = '' status_code = 0 def go(web_request): """Doing Attack :param web_request: requestId ---------------------- method url requestBody requestHeaders type ---------------------- status_code responseHeaders :return: None """ o = urlparse(web_request['url']) attack_query = parse_qs(o.query) url = o._replace(query=None).geturl() for k, v in attack_query.items(): tmp_value = v attack_query[k] = 'sleep(3)' a1 = time() print(attack_query) try: tmp_header = {} for w in web_request['requestHeaders']: tmp_header[w['name']] = w['value'] r = get(url, params=attack_query, timeout=5, headers=tmp_header) except exceptions.Timeout: r = FakeRequest() a2 = time() if a2 - a1 >= 3: ss = """\033[36m _____ _ _ _ _ | ____|_ ___ __ | | ___ (_) |_ | | | _| \ \/ / '_ \| |/ _ \| | __| | | | |___ > <| |_) | | (_) | | |_ |_| |_____/_/\_\ .__/|_|\___/|_|\__| (_) |_| \033[37m""" print(ss) print(a2 - a1) urlz = 'http://localhost:8787/success' data = { 'request_id': web_request['requestId'], 'module_id': 17384, 'url': url, 'r_method': web_request['method'], 'r_type': web_request['type'], 'attack_query': dumps(attack_query), 'body': dumps(attack_query), 'request_headers': dumps(tmp_header), 'response_headers': dumps(web_request['responseHeaders']), 'response_body': r.content, 'response_status': r.status_code, } post(urlz, data=data) attack_query[k] = tmp_value
junorouse/ullyeo-fuzzer
fuzzer/modules/bsqli_on_idx.py
Python
mit
2,139
# -*- coding: utf-8 -*- # # test-rest documentation build configuration file, created by # sphinx-quickstart on Thu Oct 12 19:53:40 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'test-rest' copyright = u'2017, Christopher Hoskin' author = u'Christopher Hoskin' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'1' # The full version, including alpha/beta/rc tags. release = u'0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', 'donate.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'test-restdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'test-rest.tex', u'test-rest Documentation', u'Christopher Hoskin', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'test-rest', u'test-rest Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'test-rest', u'test-rest Documentation', author, 'test-rest', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html']
mans0954/odfbuilder
test-doc/conf.py
Python
bsd-2-clause
5,653
import time from app.validation.abstract_validator import AbstractValidator from app.validation.validation_result import ValidationResult class MonthYearDateTypeCheck(AbstractValidator): def validate(self, user_answer): """ Validate that the users answer is a valid date :param user_answer: The answer the user provided for the response :return: ValidationResult(): An object containing the result of the validation """ result = ValidationResult(False) try: time.strptime(user_answer, "%m/%Y") return ValidationResult(True) except ValueError: result.errors.append(AbstractValidator.INVALID_DATE) except TypeError: result.errors.append(AbstractValidator.INVALID_DATE) return result
qateam123/eq
app/validation/month_year_date_type_check.py
Python
mit
817
import tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['Logit'] , ['Lag1Trend'] , ['Seasonal_DayOfMonth'] , ['AR'] );
antoinecarme/pyaf
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_Lag1Trend_Seasonal_DayOfMonth_AR.py
Python
bsd-3-clause
157
import os import numpy as np from unidecode import unidecode from codecs import open from vsm.corpus import Corpus from util import * from progressbar import ProgressBar, Percentage, Bar __all__ = ['empty_corpus', 'random_corpus', 'toy_corpus', 'corpus_fromlist', 'file_corpus', 'dir_corpus', 'coll_corpus', 'json_corpus', 'corpus_from_strings'] def corpus_from_strings(strings, metadata=[], decode=False, nltk_stop=True, stop_freq=0, add_stop=None): """ Takes a list of strings and returns a Corpus object whose document tokens are the strings. """ if decode: import unidecode for i in xrange(len(strings)): if isinstance(strings[i], unicode): strings[i] = unidecode.unidecode(strings[i]) documents = [word_tokenize(s) for s in strings] corpus = sum(documents, []) indices = np.cumsum([len(d) for d in documents]) del documents if len(metadata) == 0: metadata = ['document_{0}'.format(i) for i in xrange(len(strings))] md_type = np.array(metadata).dtype dtype = [('idx', np.int), ('document_label', md_type)] context_data = [np.array(zip(indices, metadata), dtype=dtype)] c = Corpus(corpus, context_data=context_data, context_types=['document']) return apply_stoplist(c, nltk_stop=nltk_stop, freq=stop_freq, add_stop=add_stop) def empty_corpus(context_type='document'): """ Creates an empty Corpus with defined context_type. :param context_type: A type of tokenization. Default is 'document'. :type context_type: string :returns: An empty Corpus with no words or context_data. :See Also: :class:`vsm.corpus.Corpus` """ return Corpus([], context_data=[np.array([], dtype=[('idx', np.int)])], context_types=[context_type]) def random_corpus(corpus_len, n_words, min_token_len, max_token_len, context_type='document', metadata=False, seed=None): """ Generates a random integer corpus. :param corpus_len: Size of the Corpus. :type corpus_len: int :param n_words: Number of words to draw random integers from. :type n_words: int :param min_token_len: minimum token length used to create indices for corpus. :type min_token_len: int :param max_token_len: maximum token length used to create indices for corpus. :type max_token_len: int :param context_type: A type of tokenization. Default is 'document'. :type context_type: string, optional :param metadata: If `True` generates metadata. If `False` the only metadata for the corpus is the index information. :type metadata: boolean, optional :returns: Corpus object with random integers as its entries. :See Also: :class:`vsm.corpus.Corpus` """ random_state = np.random.RandomState(seed) corpus = random_state.randint(n_words, size=corpus_len) indices = [] i = np.random.randint(min_token_len, max_token_len) while i < corpus_len: indices.append(i) i += np.random.randint(min_token_len, max_token_len) indices.append(corpus_len) if metadata: metadata_ = ['{0}_{1}'.format(context_type, i) for i in xrange(len(indices))] dtype=[('idx', np.array(indices).dtype), (context_type + '_label', np.array(metadata_).dtype)] rand_tok = np.array(zip(indices, metadata_), dtype=dtype) else: rand_tok = np.array([(i,) for i in indices], dtype=[('idx', np.array(indices).dtype)]) return Corpus(corpus, context_types=[context_type], context_data=[rand_tok]) def corpus_fromlist(ls, context_type='context'): """ Takes a list of lists or arrays containing strings or integers and returns a Corpus object. The label associated to a given context is `context_type` prepended to the context index. :param ls: List of lists or List of arrays containing strings or integers. :type ls: list :param context_type: A type of tokenization. :type context_type: string, optional :returns: A Corpus object built from `ls`. :See Also: :class:`vsm.corpus.Corpus` **Examples** >>> ls = [['a', 'b'], ['c'], ['d', 'e']] >>> c = corpus_fromlist(ls, context_type='sentence') >>> c.view_contexts('sentence', as_strings=True) [array(['a', 'b'], dtype='|S1'), array(['c'], dtype='|S1'), array(['d', 'e'], dtype='|S1')] >>> c.context_data [array([(2, 'sentence_0'), (3, 'sentence_1'), (5, 'sentence_2')], dtype=[('idx', '<i8'), ('sentence_label', '|S10')])] """ corpus = [w for ctx in ls for w in ctx] indices = np.cumsum([len(sbls) for sbls in ls]) metadata = ['{0}_{1}'.format(context_type, i) for i in xrange(len(indices))] md_type = np.array(metadata).dtype dtype = [('idx', np.int), (context_type + '_label', md_type)] context_data = [np.array(zip(indices, metadata), dtype=dtype)] return Corpus(corpus, context_data=context_data, context_types=[context_type]) def toy_corpus(plain_corpus, is_filename=False, encoding='utf8', nltk_stop=False, stop_freq=0, add_stop=None, metadata=None, autolabel=False): """ `toy_corpus` is a convenience function for generating Corpus objects from a given string or a single file. `toy_corpus` will perform both word and document-level tokenization. It will also strip punctuation and arabic numerals outside the range 1-29. All letters are made lowercase. Document tokens are delimited by two or more line breaks. E.g., <document 0> <document 1> ... <document n> where <document i> is any chunk of text to be tokenized by word. :param plain_corpus: String containing a plain-text corpus or a filename of a file containing one. :type plain_corpus: string-like :param is_filename: If `True` then `plain_corpus` is treated like a filename. Otherwise, `plain_corpus` is presumed to contain the corpus. Default is `False`. :type is_filename: boolean, optional :param encoding: A string indicating the file encoding or 'detect', in which case `chardet` is used to automatically guess the encoding. Default is `utf8`. :type encoding: string, optional :param nltk_stop: If `True` then the corpus object is masked using the NLTK English stop words. Default is `False`. :type nltk_stop: boolean, optional :param stop_freq: The upper bound for a word to be masked on the basis of its collection frequency. Default is 0. :type stop_freq: int, optional :param add_stop: A list of stop words. Default is `None`. :type add_stop: array-like, optional :param metadata: A list of strings providing metadata about the documents. If provided, must have length equal to the number of documents. Default is `None`. :type metadata: array-like, optional :param autolabel: A boolean specifying whether to automatically label documents by position in file. Default is False :type metadata: boolean, optional :returns: c : a Corpus object Contains the tokenized corpus built from the input plain-text corpus. Document tokens are named `documents`. :See Also: :class:`vsm.corpus.Corpus`, :meth:`vsm.corpus.util.paragraph_tokenize`, :meth:`vsm.corpus.util.apply_stoplist` """ if is_filename: if encoding == 'detect': encoding = detect_encoding(plain_corpus) with open(plain_corpus, 'rb', encoding=encoding) as f: plain_corpus = f.read() docs = paragraph_tokenize(plain_corpus) docs = [word_tokenize(d) for d in docs] corpus = sum(docs, []) tok = np.cumsum(np.array([len(d) for d in docs])) if not metadata and autolabel: metadata = ['Document {0}'.format(i) for i in range(len(tok))] if metadata: if not len(metadata) == len(tok): msg = 'Metadata mismatch: metadata length is {0} and number'\ 'of documents is {1}'.format(len(metadata), len(tok)) raise Exception(msg) else: dtype = [('idx', np.array(tok).dtype), ('document_label', np.array(metadata).dtype)] tok = np.array(zip(tok, metadata), dtype=dtype) else: dtype = [('idx', np.array(tok).dtype)] tok = np.array([(i,) for i in tok], dtype=dtype) c = Corpus(corpus, context_data=[tok], context_types=['document']) c = apply_stoplist(c, nltk_stop=nltk_stop, freq=stop_freq, add_stop=add_stop) return c def file_tokenize(text): """ `file_tokenize` is a helper function for :meth:`file_corpus`. Takes a string that is content in a file and returns words and corpus data. :param text: Content in a plain text file. :type text: string :returns: words : List of words. Words in the `text` tokenized by :meth:`vsm.corpus.util.word_tokenize`. corpus_data : Dictionary with context type as keys and corresponding tokenizations as values. The tokenizations are np.arrays. """ words, par_tokens, sent_tokens = [], [], [] sent_break, par_n, sent_n = 0, 0, 0 pars = paragraph_tokenize(text) for par in pars: sents = sentence_tokenize(par) for sent in sents: w = word_tokenize(sent) words.extend(w) sent_break += len(w) sent_tokens.append((sent_break, par_n, sent_n)) sent_n += 1 par_tokens.append((sent_break, par_n)) par_n += 1 idx_dt = ('idx', np.int32) sent_label_dt = ('sentence_label', np.array(sent_n, np.str_).dtype) par_label_dt = ('paragraph_label', np.array(par_n, np.str_).dtype) corpus_data = dict() dtype = [idx_dt, par_label_dt] corpus_data['paragraph'] = np.array(par_tokens, dtype=dtype) dtype = [idx_dt, par_label_dt, sent_label_dt] corpus_data['sentence'] = np.array(sent_tokens, dtype=dtype) return words, corpus_data def file_corpus(filename, encoding='utf8', nltk_stop=True, stop_freq=1, add_stop=None): """ `file_corpus` is a convenience function for generating Corpus objects from a a plain text corpus contained in a single string. `file_corpus` will strip punctuation and arabic numerals outside the range 1-29. All letters are made lowercase. :param filename: File name of the plain text file. :type plain_dir: string-like :param encoding: A string indicating the file encoding or 'detect', in which case `chardet` is used to automatically guess the encoding. Default is `utf8`. :type encoding: string, optional :param nltk_stop: If `True` then the corpus object is masked using the NLTK English stop words. Default is `False`. :type nltk_stop: boolean, optional :param stop_freq: The upper bound for a word to be masked on the basis of its collection frequency. Default is 1. :type stop_freq: int, optional :param add_stop: A list of stop words. Default is `None`. :type add_stop: array-like, optional :returns: c : a Corpus object Contains the tokenized corpus built from the input plain-text corpus. Document tokens are named `documents`. :See Also: :class:`vsm.corpus.Corpus`, :meth:`file_tokenize`, :meth:`vsm.corpus.util.apply_stoplist` """ if encoding == 'detect': encoding = detect_encoding(filename) with open(filename, mode='r', encoding=encoding) as f: text = f.read() words, tok = file_tokenize(text) names, data = zip(*tok.items()) c = Corpus(words, context_data=data, context_types=names) c = apply_stoplist(c, nltk_stop=nltk_stop, freq=stop_freq, add_stop=add_stop) return c def json_corpus(json_file, doc_key, label_key, encoding='utf8', nltk_stop=False, stop_freq=0, add_stop=None): """ `json_corpus` is a convenience function for generating Corpus objects from a json file. It construct a corpus, document labels and metadata respectively from the specified fields in the json file. `json_corpus` will perform word-level tokenization. It will also strip punctuation and arabic numerals outside the range 1-29. All letters are made lowercase. :param json_file: Json file name containing documents and metadata. :type json_file: string-like :param doc_key: Name of the key for documents. :type doc_key: string-like :param label_key: Name of the key used for document labels. Labels are used when a viewer function outputs a list of documents. Any field other than `doc_key` and `label_key` is stored as metadata. :type label_key: string-like :param encoding: A string indicating the file encoding or 'detect', in which case `chardet` is used to automatically guess the encoding. Default is `utf8`. :type encoding: string, optional :param nltk_stop: If `True` then the corpus object is masked using the NLTK English stop words. Default is `False`. :type nltk_stop: boolean, optional :param stop_freq: The upper bound for a word to be masked on the basis of its collection frequency. Default is 0. :type stop_freq: int, optional :param add_stop: A list of stop words. Default is `None`. :type add_stop: array-like, optional :returns: c : a Corpus object Contains the tokenized corpus built from the input plain-text corpus. Document tokens are named `documents`. :See Also: :class:`vsm.corpus.Corpus`, :meth:`vsm.corpus.util.paragraph_tokenize`, :meth:`vsm.corpus.util.apply_stoplist` """ import json if encoding == 'detect': encoding = detect_encoding(json_file) with open(json_file, 'r', encoding=encoding) as f: json_data = json.load(f) docs = [] label = [] metadata = [] for i in json_data: docs.append(i.pop(doc_key, None).encode('ascii','ignore')) label.append(i.pop(label_key, None)) metadata.append(i) # metadata are all the rest docs = [word_tokenize(d) for d in docs] corpus = sum(docs, []) tok = np.cumsum(np.array([len(d) for d in docs])) # add document label and metadata dtype = [('idx', np.array(tok).dtype), ('document_label', np.array(label).dtype), ('metadata', np.array(metadata).dtype)] # todo: create separate dtype for each key? tok = np.array(zip(tok, label, metadata), dtype=dtype) c = Corpus(corpus, context_data=[tok], context_types=['document']) c = apply_stoplist(c, nltk_stop=nltk_stop, freq=stop_freq, add_stop=add_stop) return c def dir_tokenize(chunks, labels, chunk_name='article', paragraphs=True, verbose=1): """`dir_tokenize` is a helper function for :meth:`dir_corpus`. Takes a list of strings and labels and returns words and corpus data. :param chunks: List of strings. :type chunks: list :param labels: List of strings. :type labels: list :param chunk_name: The name of the tokenization corresponding to individual files. For example, if the input strings are pages of a book, one might set `chunk_name` to `pages`. Default is `articles`. :type chunk_name: string-like, optional :param paragraphs: If `True`, a paragraph-level tokenization is included. Defaults to `True`. :type paragraphs: boolean, optional :param verbose: Verbosity level. 1 prints a progress bar. :type verbose: int, default 1 :returns: words : List of words. words in the `chunks` tokenized by :meth: word_tokenize. corpus_data : Dictionary with context type as keys and corresponding tokenizations as values. The tokenizations are np.arrays. """ words, chk_tokens, sent_tokens = [], [], [] sent_break, chk_n, sent_n = 0, 0, 0 if verbose == 1: pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(chunks)).start() if paragraphs: par_tokens = [] par_n = 0 for chk, label in zip(chunks, labels): # print 'Tokenizing', label pars = paragraph_tokenize(chk) for par in pars: sents = sentence_tokenize(par) for sent in sents: w = word_tokenize(sent) words.extend(w) sent_break += len(w) sent_tokens.append((sent_break, label, par_n, sent_n)) sent_n += 1 par_tokens.append((sent_break, label, par_n)) par_n += 1 if verbose == 1: pbar.update(chk_n) chk_tokens.append((sent_break, label)) chk_n += 1 else: for chk, label in zip(chunks, labels): # print 'Tokenizing', label sents = sentence_tokenize(chk) for sent in sents: w = word_tokenize(sent) words.extend(w) sent_break += len(w) sent_tokens.append((sent_break, label, sent_n)) sent_n += 1 if verbose == 1: pbar.update(chk_n) chk_tokens.append((sent_break, label)) chk_n += 1 idx_dt = ('idx', np.int32) label_dt = (chunk_name + '_label', np.array(labels).dtype) sent_label_dt = ('sentence_label', np.array(sent_n, np.str_).dtype) corpus_data = dict() dtype = [idx_dt, label_dt] corpus_data[chunk_name] = np.array(chk_tokens, dtype=dtype) if paragraphs: par_label_dt = ('paragraph_label', np.array(par_n, np.str_).dtype) dtype = [idx_dt, label_dt, par_label_dt] corpus_data['paragraph'] = np.array(par_tokens, dtype=dtype) dtype = [idx_dt, label_dt, par_label_dt, sent_label_dt] corpus_data['sentence'] = np.array(sent_tokens, dtype=dtype) else: dtype = [idx_dt, label_dt, sent_label_dt] corpus_data['sentence'] = np.array(sent_tokens, dtype=dtype) if verbose == 1: pbar.finish() return words, corpus_data def dir_corpus(plain_dir, chunk_name='article', encoding='utf8', paragraphs=True, ignore=['.json','.log','.pickle'], nltk_stop=True, stop_freq=1, add_stop=None, decode=False, verbose=1): """ `dir_corpus` is a convenience function for generating Corpus objects from a directory of plain text files. `dir_corpus` will retain file-level tokenization and perform sentence and word tokenizations. Optionally, it will provide paragraph-level tokenizations. It will also strip punctuation and arabic numerals outside the range 1-29. All letters are made lowercase. :param plain_dir: String containing directory containing a plain-text corpus. :type plain_dir: string-like :param chunk_name: The name of the tokenization corresponding to individual files. For example, if the files are pages of a book, one might set `chunk_name` to `pages`. Default is `articles`. :type chunk_name: string-like, optional :param encoding: A string indicating the file encoding or 'detect', in which case `chardet` is used to automatically guess the encoding. Default is `utf8`. :type encoding: string, optional :param paragraphs: If `True`, a paragraph-level tokenization is included. Defaults to `True`. :type paragraphs: boolean, optional :param ignore: The list containing suffixes of files to be filtered. The suffix strings are normally file types. Default is ['.json', '.log','.pickle']. :type ignore: list of strings, optional :param nltk_stop: If `True` then the corpus object is masked using the NLTK English stop words. Default is `False`. :type nltk_stop: boolean, optional :param stop_freq: The upper bound for a word to be masked on the basis of its collection frequency. Default is 1. :type stop_freq: int, optional :param add_stop: A list of stop words. Default is `None`. :type add_stop: array-like, optional :param verbose: Verbosity level. 1 prints a progress bar. :type verbose: int, default 1 :returns: c : a Corpus object Contains the tokenized corpus built from the input plain-text corpus. Document tokens are named `documents`. :See Also: :class:`vsm.corpus.Corpus`, :meth:`dir_tokenize`, :meth:`vsm.corpus.util.apply_stoplist` """ chunks = [] filenames = os.listdir(plain_dir) filenames = filter_by_suffix(filenames, ignore) filenames.sort() for filename in filenames: filename = os.path.join(plain_dir, filename) if encoding == 'detect': encoding = detect_encoding(filename) if decode: with open(filename, mode='r', encoding=encoding) as f: if decode: chunks.append(unidecode(f.read())) else: with open(filename, mode='r', encoding=encoding) as f: chunks.append(f.read()) words, tok = dir_tokenize(chunks, filenames, chunk_name=chunk_name, paragraphs=paragraphs, verbose=verbose) names, data = zip(*tok.items()) c = Corpus(words, context_data=data, context_types=names) c = apply_stoplist(c, nltk_stop=nltk_stop, freq=stop_freq, add_stop=add_stop) return c def coll_tokenize(books, book_names, verbose=1): """ `coll_tokenize` is a helper function for :meth:`coll_corpus`. Takes a list of books and `book_names`, and returns words and corpus data. :param books: List of books. :type books: list :param book_names: List of book names. :type book_names: list :param verbose: Verbosity level. 1 prints a progress bar. :type verbose: int, default 1 :returns: words : List of words. words in the `books` tokenized by :meth:`word_tokenize`. corpus_data : Dictionary with context type as keys and corresponding tokenizations as values. The tokenizations are np.arrays. """ words, book_tokens, page_tokens, sent_tokens = [], [], [], [] sent_break, book_n, page_n, sent_n = 0, 0, 0, 0 if verbose == 1: pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(books)).start() for book, book_label in zip(books, book_names): # print 'Tokenizing', book_label for page, page_file in book: sents = sentence_tokenize(page) for sent in sents: w = word_tokenize(sent) words.extend(w) sent_break += len(w) sent_tokens.append((sent_break, sent_n, page_n, book_label, page_file)) sent_n += 1 page_tokens.append((sent_break, page_n, book_label, page_file)) page_n += 1 if verbose == 1: pbar.update(book_n) book_tokens.append((sent_break, book_label)) book_n += 1 idx_dt = ('idx', np.int32) book_label_dt = ('book_label', np.array(book_names).dtype) page_label_dt = ('page_label', np.array(page_n, np.str_).dtype) sent_label_dt = ('sentence_label', np.array(sent_n, np.str_).dtype) files = [f for (a,b,c,f) in page_tokens] file_dt = ('file', np.array(files, np.str_).dtype) corpus_data = dict() dtype = [idx_dt, book_label_dt] corpus_data['book'] = np.array(book_tokens, dtype=dtype) dtype = [idx_dt, page_label_dt, book_label_dt, file_dt] corpus_data['page'] = np.array(page_tokens, dtype=dtype) dtype = [idx_dt, sent_label_dt, page_label_dt, book_label_dt, file_dt] corpus_data['sentence'] = np.array(sent_tokens, dtype=dtype) if verbose == 1: pbar.finish() return words, corpus_data #TODO: This should be a whitelist not a blacklist def coll_corpus(coll_dir, encoding='utf8', ignore=['.json', '.log', '.pickle'], nltk_stop=True, stop_freq=1, add_stop=None, decode=False, verbose=1): """ `coll_corpus` is a convenience function for generating Corpus objects from a directory of plain text files. It will also strip punctuation and arabic numerals outside the range 1-29. All letters are made lowercase. :param coll_dir: Directory containing a collections of books which contain pages as plain-text files. :type coll_dir: string-like :param encoding: A string indicating the file encoding or 'detect', in which case `chardet` is used to automatically guess the encoding. Default is `utf8`. :type encoding: string, optional :param ignore: The list containing suffixes of files to be filtered. The suffix strings are normally file types. Default is ['.json', '.log','.pickle']. :type ignore: list of strings, optional :param nltk_stop: If `True` then the corpus object is masked using the NLTK English stop words. Default is `False`. :type nltk_stop: boolean, optional :param stop_freq: The upper bound for a word to be masked on the basis of its collection frequency. Default is 1. :type stop_freq: int, optional :param add_stop: A list of stop words. Default is `None`. :type add_stop: array-like, optional :param verbose: Verbosity level. 1 prints a progress bar. :type verbose: int, default 1 :returns: c : a Corpus object Contains the tokenized corpus built from the plain-text files in `coll_dir` corpus. Document tokens are named `documents`. """ books = [] book_names = os.listdir(coll_dir) book_names = filter_by_suffix(book_names, ignore) book_names.sort() for book_name in book_names: pages = [] book_path = os.path.join(coll_dir, book_name) page_names = os.listdir(book_path) page_names = filter_by_suffix(page_names, ignore) page_names.sort() for page_name in page_names: page_file = book_name + '/' + page_name page_name = os.path.join(book_path, page_name) if encoding == 'detect': encoding = detect_encoding(page_name) if decode: with open(page_name, mode='r', encoding=encoding) as f: pages.append((unidecode(f.read()), page_file)) else: with open(page_name, mode='r', encoding=encoding) as f: pages.append((f.read(), page_file)) books.append(pages) words, tok = coll_tokenize(books, book_names) names, data = zip(*tok.items()) c = Corpus(words, context_data=data, context_types=names) c = apply_stoplist(c, nltk_stop=nltk_stop, freq=stop_freq, add_stop=add_stop) return c ########### # Testing # ########### def test_toy_corpus(): keats = ('She dwells with Beauty - Beauty that must die;\n\n' 'And Joy, whose hand is ever at his lips\n\n' 'Bidding adieu; and aching Pleasure nigh,\n\n' 'Turning to poison while the bee-mouth sips:\n\n' 'Ay, in the very temple of Delight\n\n' 'Veil\'d Melancholy has her sovran shrine,\n\n' 'Though seen of none save him whose strenuous tongue\n\n' 'Can burst Joy\'s grape against his palate fine;\n\n' 'His soul shall taste the sadness of her might,\n\n' 'And be among her cloudy trophies hung.') assert toy_corpus(keats) assert toy_corpus(keats, nltk_stop=True) assert toy_corpus(keats, stop_freq=1) assert toy_corpus(keats, add_stop=['and', 'with']) assert toy_corpus(keats, nltk_stop=True, stop_freq=1, add_stop=['ay']) import os from tempfile import NamedTemporaryFile as NFT tmp = NFT(delete=False) tmp.write(keats) tmp.close() c = toy_corpus(tmp.name, is_filename=True, nltk_stop=True, add_stop=['ay']) assert c os.remove(tmp.name) return c def test_dir_tokenize(): chunks = ['foo foo foo\n\nfoo foo', 'Foo bar. Foo bar.', '', 'foo\n\nfoo'] labels = [str(i) for i in xrange(len(chunks))] words, context_data = dir_tokenize(chunks, labels) assert len(words) == 11 assert len(context_data['article']) == 4 assert len(context_data['paragraph']) == 6 assert len(context_data['sentence']) == 7 assert (context_data['article']['idx'] == [5, 9, 9, 11]).all() assert (context_data['article']['article_label'] == ['0', '1', '2', '3']).all() assert (context_data['paragraph']['idx'] == [3, 5, 9, 9, 10, 11]).all() assert (context_data['paragraph']['article_label'] == ['0', '0', '1', '2', '3', '3']).all() assert (context_data['paragraph']['par_label'] == ['0', '1', '2', '3', '4', '5']).all() assert (context_data['sentence']['idx'] == [3, 5, 7, 9, 9, 10, 11]).all() assert (context_data['sentence']['article_label'] == ['0', '0', '1', '1', '2', '3', '3']).all() assert (context_data['sentence']['par_label'] == ['0', '1', '2', '2', '3', '4', '5']).all() assert (context_data['sentence']['sent_label'] == ['0', '1', '2', '3', '4', '5', '6']).all() def test_coll_tokenize(): books = [[('foo foo foo.\n\nfoo foo', '1'), ('Foo bar. Foo bar.', '2')], [('','3'), ('foo.\n\nfoo', '4')]] book_names = [str(i) for i in xrange(len(books))] words, context_data = coll_tokenize(books, book_names) assert len(words) == 11 assert len(context_data['book']) == 2 assert len(context_data['page']) == 4 assert len(context_data['sentence']) == 7 assert (context_data['book']['idx'] == [9, 11]).all() assert (context_data['book']['book_label'] == ['0', '1']).all() assert (context_data['page']['idx'] == [5, 9, 9, 11]).all() assert (context_data['page']['page_label'] == ['0', '1', '2', '3']).all() assert (context_data['page']['book_label'] == ['0', '0', '1', '1']).all() assert (context_data['sentence']['idx'] == [3, 5, 7, 9, 9, 10, 11]).all() assert (context_data['sentence']['sent_label'] == ['0', '1', '2', '3', '4', '5', '6']).all() assert (context_data['sentence']['page_label'] == ['0', '0', '1', '1', '2', '3', '3']).all() assert (context_data['sentence']['book_label'] == ['0', '0', '0', '0', '1', '1', '1']).all() assert (context_data['page']['file'] == ['1','2','3','4']).all() assert (context_data['sentence']['file'] == ['1','1','2','2','3','4','4']).all()
iSumitG/vsm
vsm/extensions/corpusbuilders/corpusbuilders.py
Python
mit
31,417
""" tools for reading and writing OPTIM input and output files """ import numpy as np from pele.storage import Minimum, TransitionState _id_count = 0 # class UnboundMinimum(object): # """ # a class to duplicate some of the functionality of the Minimum class # """ # energy = None # coords = None # fvib = None # pgorder = None # # _id_count = 0 # def __init__(self, energy, coords): # self.energy = energy # self.coords = coords # global _id_count # self.id() = _id_count # _id_count += 1 # # def __eq__(self, m): # """m can be integer or Minima object""" # assert self.id() is not None # if isinstance(m, UnboundMinimum): # assert m.id() is not None # return self.id() == m.id() # else: # return self.id() == m # # def __hash__(self): # assert self.id() is not None # return self.id() def read_points_min_ts(fname, ndof=None, endianness="="): """ read coords from a points.min or a points.ts file Notes ----- the files were written with fortran code that looks something like this:: NOPT = 3 * NATOMS INQUIRE(IOLENGTH=NDUMMY) COORDS(1:NOPT) OPEN(13,FILE='points.min,ACCESS='DIRECT',FORM='UNFORMATTED',STATUS='UNKNOWN',RECL=NDUMMY) DO J1=1,NMIN WRITE(13,REC=J1) COORDS(1:NOPT) ENDDO CLOSE(13) This means the data is stored without any header information. It is just a long list of double precision floating point numbers. Note that some fortran compilers use different endiness for the data. If the coordinates comes out garbage this is probably the problem. The solution is to pass a different data type dtype=np.dtype("<d") # for little-endian double precision dtype=np.dtype(">d") # for big-endian double precision Parameters ---------- fname : str filenname to read from ndof : int, optional for testing to make sure the number of floats read is a multiple of ndof endianness : str define the endianness of the data. can be "=", "<", ">" """ with open(fname, "rb") as fin: coords = np.fromfile(fin, dtype=np.dtype(endianness + "d")) if ndof is not None: if len(coords) % ndof != 0: raise Exception("number of double precision variables read from %s (%s) is not divisible by ndof (%d)" % (fname, len(coords), ndof)) # print coords return coords.reshape(-1) class OptimDBConverter(object): """ Converts old OPTIM to pele database Parameters ---------- database : pele Database the minima and transition states will be place in here ndof : int, optional for testing to make sure the number of floats read is a multiple of ndof mindata, tsdata, pointsmin, pointsts : str the files to read from. The files contain points.min : the coordinates of the minima in binary format min.data : additional information about the minima (like the energy) points.ts : the coordinates of the transition states min.ts : additional information about transition states (like which minima they connect) endianness : str define the endianness of the binary data. can be "=", "<", ">" assert_coords : bool If this is True the conversion will abort if the coordinate conversion doesn't work. Set this to false if you only care about the minima and ts metadata. Notes ----- the files were written with fortran code that looks something like this:: NOPT = 3 * NATOMS INQUIRE(IOLENGTH=NDUMMY) COORDS(1:NOPT) OPEN(13,FILE='points.min,ACCESS='DIRECT',FORM='UNFORMATTED',STATUS='UNKNOWN',RECL=NDUMMY) DO J1=1,NMIN WRITE(13,REC=J1) COORDS(1:NOPT) ENDDO CLOSE(13) This means the data is stored without any header information. It is just a long list of double precision floating point numbers. Note that some fortran compilers use different endiness for the data. If the coordinates comes out garbage this is probably the problem. The solution is to pass a different data type dtype=np.dtype("<d") # for little-endian double precision dtype=np.dtype(">d") # for big-endian double precision """ def __init__(self, database, ndof=None, mindata="min.data", tsdata="ts.data", pointsmin="points.min", pointsts="points.ts", endianness="=", assert_coords=True): self.db = database self.ndof = ndof self.mindata = mindata self.tsdata = tsdata self.pointsmin = pointsmin self.pointsts = pointsts self.endianness = endianness self.no_coords_ok = not assert_coords def setAccuracy(self, accuracy=0.000001): self.db.accuracy = accuracy def ReadMinDataFast(self): """read min.data file this method uses bulk database inserts. It is *MUCH* faster this way, but you have to be careful that this and the Minimum object stays in sync. e.g. minimum.invalid must be set to false manually here. """ print "reading from", self.mindata indx = 0 # f_len = file_len(self.mindata) minima_dicts = [] for line in open(self.mindata, 'r'): sline = line.split() # get the coordinates corresponding to this minimum if self.pointsmin_data is None: coords = np.zeros(1) else: coords = self.pointsmin_data[indx, :] # read data from the min.data line e, fvib = map(float, sline[:2]) # energy and vibrational free energy pg = int(sline[2]) # point group order # create the minimum object and attach the data # must add minima like this. If you use db.addMinimum() # some minima with similar energy might be assumed to be duplicates min_dict = dict(energy=e, coords=coords, invalid=False, fvib=fvib, pgorder=pg ) minima_dicts.append(min_dict) indx += 1 # if indx % 50 == 0: # self.db.session.commit() self.db.engine.execute(Minimum.__table__.insert(), minima_dicts) self.db.session.commit() print "--->finished loading %s minima" % indx def ReadMindata(self): # pragma: no cover print "reading from", self.mindata indx = 0 # f_len = file_len(self.mindata) self.index2min = dict() for line in open(self.mindata, 'r'): sline = line.split() # get the coordinates corresponding to this minimum if self.pointsmin_data is None: coords = np.zeros(1) else: coords = self.pointsmin_data[indx, :] # read data from the min.data line e, fvib = map(float, sline[:2]) # energy and vibrational free energy pg = int(sline[2]) # point group order # create the minimum object and attach the data # must add minima like this. If you use db.addMinimum() # some minima with similar energy might be assumed to be duplicates min1 = Minimum(e, coords) min1.fvib = fvib min1.pgorder = pg self.index2min[indx] = min1 indx += 1 self.db.session.add(min1) if indx % 50 == 0: self.db.session.commit() print "--->finished loading %s minima" % indx def ReadTSdataFast(self): """read ts.data file this method uses bulk database inserts. It is *MUCH* faster this way, but you have to be careful that this and the TransitionState object stays in sync. e.g. ts.invalid must be set to false manually here. """ print "reading from", self.tsdata indx = 0 ts_dicts = [] for line in open(self.tsdata, 'r'): sline = line.split() # get the coordinates corresponding to this minimum if self.pointsts_data is None: coords = np.zeros(1) else: coords = self.pointsts_data[indx, :] # read data from the min.ts line e, fvib = map(float, sline[:2]) # get energy and fvib pg = int(sline[2]) # point group order m1indx, m2indx = map(int, sline[3:5]) # m1indx -= 1 # m2indx -= 1 # min1 = self.index2min[m1indx - 1] # minus 1 for fortran indexing # min2 = self.index2min[m2indx - 1] # minus 1 for fortran indexing # must add transition states like this. If you use db.addtransitionState() # some transition states might be assumed to be duplicates tsdict = dict(energy=e, coords=coords, invalid=False, fvib=fvib, pgorder=pg, _minimum1_id=m1indx, _minimum2_id=m2indx ) ts_dicts.append(tsdict) indx += 1 # if indx % 50 == 0: # self.db.session.commit() self.db.engine.execute(TransitionState.__table__.insert(), ts_dicts) self.db.session.commit() print "--->finished loading %s transition states" % indx def ReadTSdata(self): # pragma: no cover print "reading from", self.tsdata indx = 0 for line in open(self.tsdata, 'r'): sline = line.split() # get the coordinates corresponding to this minimum if self.pointsts_data is None: coords = np.zeros(1) else: coords = self.pointsts_data[indx, :] # read data from the min.ts line e, fvib = map(float, sline[:2]) # get energy and fvib pg = int(sline[2]) # point group order m1indx, m2indx = map(int, sline[3:5]) min1 = self.index2min[m1indx - 1] # minus 1 for fortran indexing min2 = self.index2min[m2indx - 1] # minus 1 for fortran indexing # must add transition states like this. If you use db.addtransitionState() # some transition states might be assumed to be duplicates trans = TransitionState(e, coords, min1, min2) trans.fvib = fvib trans.pgorder = pg indx += 1 self.db.session.add(trans) if indx % 50 == 0: self.db.session.commit() print "--->finished loading %s transition states" % indx def read_points_min(self): print "reading from", self.pointsmin coords = read_points_min_ts(self.pointsmin, self.ndof, endianness=self.endianness) if coords.size == 0: raise Exception(self.pointsmin + " is empty") if self.ndof is None: # try to get the number of minima from the min.data file nminima = sum((1 for _ in open(self.mindata, "r"))) assert len(coords.shape) == 1 if coords.size % nminima != 0: raise ValueError("the number of data points in %s is not divisible by %s the number of minima in %s" % (self.mindata, coords.size, nminima)) self.ndof = coords.size / nminima print "read %s minimum coordinates of length %s" % (nminima, self.ndof) self.pointsmin_data = coords.reshape([-1, self.ndof]) def read_points_ts(self): print "reading from", self.pointsts coords = read_points_min_ts(self.pointsts, self.ndof, endianness=self.endianness) self.pointsts_data = coords.reshape([-1, self.ndof]) def load_minima(self): try: self.read_points_min() except IOError: if self.no_coords_ok: self.pointsmin_data = None else: raise self.ReadMinDataFast() self.db.session.commit() def load_transition_states(self): try: self.read_points_ts() except IOError: if self.no_coords_ok: self.pointsts_data = None else: raise self.ReadTSdataFast() self.db.session.commit() def convert(self): self.load_minima() self.load_transition_states() def convert_no_coords(self): self.pointsmin_data = None self.pointsts_data = None self.ReadMinDataFast() self.ReadTSdataFast()
cjforman/pele
pele/utils/optim_compatibility.py
Python
gpl-3.0
12,835
# -*- coding: utf-8 -*- from tagging.models import Tag, TaggedItem from django.contrib.contenttypes.models import ContentType from laws.models import Vote, Bill from committees.models import CommitteeMeeting import operator def approve(admin, request, tag_suggestions): for tag_suggestion in tag_suggestions: obj = tag_suggestion.object ct = ContentType.objects.get_for_model(obj) tag, t_created = Tag.objects.get_or_create(name=tag_suggestion.name) ti, ti_created = TaggedItem.objects.get_or_create( tag=tag, object_id=obj.pk, content_type=ct) tag_suggestion.delete() def sum_add_two_dictionaries(dict, dict_to_add): """Takes two dictionaries, assuming their values are numeric, and sum each item that exist in both, writing the merged dictionary to the first dictionary.""" # go over the dictionary to add for key in dict_to_add: if key in dict: dict[key] += dict_to_add[key] else: dict[key] = dict_to_add[key] # A list of prefix charcters to use in tag extraction prefixes = [u'ב', u'ו', u'ה', u'מ', u'מה', u'ל', u''] _all_tags_names = [] def all_tags_names(): '''Lazy intialization of tags list''' global _all_tags_names if (_all_tags_names == []): # Extract only used tags, to avoid irrelevant tags vote_tags = Tag.objects.usage_for_model(Vote) bill_tags = Tag.objects.usage_for_model(Bill) cm_tags = Tag.objects.usage_for_model(CommitteeMeeting) all_tags = list(set(vote_tags).union(bill_tags).union(cm_tags)) # A list of tags that have been tagged over 10 times in the website _all_tags_names = [tag.name for tag in all_tags] return _all_tags_names def get_tags_in_text(text): """Returns a dictionary, the keys are tags found in text, and the values are the number of occurrences in text""" result_dict = {} words = text.split() if text is not None else [] # look for tag in word for tag in all_tags_names(): # create tag variations according to prefixes tag_variations = [(p + tag) for p in prefixes] # find number of occurences of tags for each word occurence_count = 0 for word in words: if word in tag_variations: occurence_count += 1 # if tag found more than once, add them if occurence_count > 0: result_dict[tag] = result_dict.get(tag, 0) + occurence_count return result_dict def extract_suggested_tags(current_tags, text_list): '''Returns a sorted list consisting of key/value tuples where the keys are tags found in arguments' text, and the values are the number of occurrences in arguments text. current_tags are removed from final list. The list is sorted from most occuring tags to least occuring tags''' tags_occurrences = {} # find occurences of tags in text for text_to_extract in text_list: sum_add_two_dictionaries(tags_occurrences, get_tags_in_text(text_to_extract)) # remove tags that are already tagged for tag in current_tags: if tag.name in tags_occurrences: del tags_occurrences[tag.name] # sort suggestions return sorted(tags_occurrences.iteritems(), key=operator.itemgetter(1), reverse=True)
OriHoch/Open-Knesset
auxiliary/tag_suggestions/__init__.py
Python
bsd-3-clause
3,351
# File name: measure_module.py # # If you did not already look at 'basic_measure_script.py' # do that first. # To load this module type 'import measure_module' # to run an example function read on. # Modules are often more convenient then scripts. In a # module you can define man functions that can be resued # in other functions or scripts. The functions are accessed # by first importing the module, and then call a function # within the module with <module>.<function>() # Remember that a module has its own namespace, so many # object that exist in the main namespace (like instruments) # need to be imported explicitly. import numpy from time import time,sleep import os from qtlab.source import qt ##################################################### # this part is to simulate some data, you can skip it ##################################################### # fake data def lorentzian(x, center, width): return 1/numpy.pi*(0.5*width)/((x-center)**2+(0.5*width)**2) def addnoise(x, variance): return x+variance*numpy.random.randn(numpy.size(x)) def fake_data(x,y): return addnoise(lorentzian(x,y*y/5+1,0.1),0.1)[0] # fake instruments def fake_ivvi_set_dac_3(val): global fake_dac_3 fake_dac_3 = val def fake_mw_src_set_freq(val): global fake_freq fake_freq = val def fake_readout_psw(): global fake_dac_3, fake_freq return fake_data(fake_freq, fake_dac_3) ###################################################### # example 1 - basic ###################################################### def example1(f_vec, b_vec): ''' this example is exactly the same as 'basic_measure_script.py' but now made into a function. The main advantage is that now the parameters f_vec and b_vec can be provided when calling the function: "measure_module.example1(vec1, vec2)", instead of having to change the script. To run the function type in the terminal: fv=numpy.arange(0,10,0.01) bv=numpy.arange(-5,5,0.1) measure_module.example1(fv, bv) ''' qt.mstart() data = qt.Data(name='testmeasurement') data.add_coordinate('frequency, mw src 1 [Hz]') data.add_coordinate('Bfield, ivvi dac 3 [mV]') data.add_value('Psw SQUID') data.create_file() plot2d = qt.Plot2D(data, name='measure2D') plot3d = qt.Plot3D(data, name='measure3D', style='image') for b in b_vec: fake_ivvi_set_dac_3(b) for f in f_vec: fake_mw_src_set_freq(f) result = fake_readout_psw() data.add_data_point(f, b, result) qt.msleep(0.01) data.new_block() data.close_file() qt.mend() ####################### # example 2 - data ####################### def example2(f_vec, b_vec): ''' This example introduces three new features: 1) setting format and/or precision of data in the datafile. using 'precision' will keep the default scientific notation, 'format' can be anything you like => add_coordinate(precision=<nr>) => add_coordinate(format='<format_string>') 2) specify specific filepath for the data file (in stead of automatic filepath) => create_file(filepath=<filepath>) 3) turn off automatic saving of instrument-settings-file. => create_file(settings_file=False) To run the function type in the terminal: fv=numpy.arange(0,10,0.01) bv=numpy.arange(-5,5,0.1) measure_module.example2(fv, bv) ''' qt.mstart() # this shows how to change format of saved data (per column) data = qt.Data(name='testmeasurement') data.add_coordinate('frequency, mw src 1 [Hz]', precision=3) data.add_coordinate('Bfield, ivvi dac 3 [mV]', format='%.12f') data.add_value('Psw SQUID', format='%.3e') data.create_file() # this shows how to save to a specific path and name, and how # to avoid a settings file to be created. The directory is first # retreived from the previous data object dir = data.get_dir() maxfilepath = os.path.join(dir, 'maxvals.dat') data_max = qt.Data(name='maxvals') data_max.add_coordinate('Bfield, ivvi dac 3 [mV]') data_max.add_value('resonance frequency [Hz]') data_max.create_file( filepath=maxfilepath, settings_file=False) plot2d = qt.Plot2D(data, name='measure2D') plot3d = qt.Plot3D(data, name='measure3D', style='image') plot2dmax = qt.Plot2D(data_max, name='maxvals') for b in b_vec: fake_ivvi_set_dac_3(b) last_trace = [] for f in f_vec: fake_mw_src_set_freq(f) result = fake_readout_psw() data.add_data_point(f, b, result) last_trace.append(result) qt.msleep(0.01) data.new_block() loc_of_max = numpy.argmax(last_trace) freq_at_max = f_vec[loc_of_max] data_max.add_data_point(b, freq_at_max) data.close_file() data_max.close_file() qt.mend() ####################### # example 3 - plotting ####################### def example3(x_vec=numpy.linspace(0,10,10), y_vec=numpy.linspace(0,10,50)): ''' To run the function type in the terminal: measure_module.example3() ''' qt.mstart() data = qt.Data(name='testmeasurement') data.add_coordinate('x') data.add_coordinate('y') data.add_value('z1') data.add_value('z2') data.add_value('z3') data.create_file() plot2d_1 = qt.Plot2D(data, name='2D_1', coorddim=1, valdim=2) plot2d_2 = qt.Plot2D(data, name='2D_2', coorddim=1, valdim=2, maxtraces=1) plot2d_3 = qt.Plot2D(data, name='2D_3', coorddim=1, valdim=2, maxtraces=1) plot2d_3.add_data(data, coorddim=1, valdim=3, maxtraces=1) plot2d_3.add_data(data, coorddim=1, valdim=4, maxtraces=1) plot2d_4 = qt.Plot2D(data, name='2D_4', coorddim=1, valdim=2, mintime=0.3) plot2d_5 = qt.Plot2D(data, name='2D_5', coorddim=1, valdim=2, autoupdate=False) plot3d_1 = qt.Plot3D(data, name='3D_1', style='image') plot3d_2 = qt.Plot3D(data, name='3D_2', style='image', coorddims=(1,0), valdim=4) for x in x_vec: for y in y_vec: z1 = numpy.sin(x+y) z2 = numpy.cos(x+y) z3 = numpy.sin(x+2*y) data.add_data_point(x, y, z1, z2, z3) if z1>0: plot2d_5.update() qt.msleep(0.1) data.new_block() plot2d_1.save_png() plot2d_1.save_gp() plot3d_2.save_png() plot3d_2.save_gp() data.close_file() qt.mend()
AdriaanRol/QTLab-UserFolder
examples/measure_module.py
Python
gpl-3.0
6,531
from __future__ import absolute_import import numpy as np from time import time from sklearn.tree import DecisionTreeClassifier from .utils import ClassifyResult def decision_tree_test(Xtrain, Ytrain, Xtest, pp, opts): """Replicates the C4.5 classifier used by Ishikawa. """ tic = time() clf = DecisionTreeClassifier() clf.fit(Xtrain, Ytrain) proba = clf.predict_proba(Xtest) ranking = np.argsort(-proba) elapsed = time() - tic yield ClassifyResult(ranking, elapsed, 'dtree [%s]' % pp)
all-umass/superman
superman/classifiers/decision_tree.py
Python
mit
508
#------------------------------------------------------------------------------- # Name: Heuristic_Coeff.py # Purpose: Implements FIH algorithm found in "An Integer Linear Programming Scheme to Sanitize Sensitive Frequent Itemsets" by Kagklis et al. # Author: Vasileios Kagklis # Created: 20/03/2014 # Copyright: (c) Vasileios Kagklis #------------------------------------------------------------------------------- from __future__ import print_function from time import clock from math import ceil import cplex from cplex import SparsePair from fim import apriori import myiolib import hcba_ext from SetOp import * ################################################### def findMin(S): result = [] for i in xrange(len(S)): flag = True for j in xrange(len(S)): if i == j: continue if len(S[i]) >= len(S[j]) and S[i].issuperset(S[j]): flag = False break elif len(S[i]) < len(S[j]) and S[i].issubset(S[j]): flag = True elif len(S[i]) == len(S[j]): flag = True if flag: result.append(S[i]) if len(result) == 0: return(S) else: return(result) ################################################### def convert2frozen(rev_fd): result = [] for itemset in rev_fd: for item in itemset: if isinstance(item, float): temp = itemset - frozenset([item]) result.append(temp) return(result) ################################################### def Heuristic_Coeff_main(fname1, fname2, fname3, sup, mod_name): change_raw_data = 0 L = [] solution = None k =0 # Read dataset and identify discrete items lines, tid = myiolib.readDataset(fname3) I = hcba_ext.get_1itemsets(tid) # Calculate support count abs_supp = ceil(sup*lines-0.5) # Load F from file F = myiolib.readLargeData(fname1) # Load S from file S = minSet(myiolib.readSensitiveSet(fname2)) # Calculate the revised F start_time = clock() SS = supersets(S, F) Rev_Fd = list(set(F)-SS) rev_t = clock() - start_time Rev_Fd.sort(key = len, reverse = True) # Calculate minimal set of S sens_ind =[] for i in xrange(lines): for itemset in S: if itemset.issubset(tid[i]): sens_ind.append(i) break start_time = clock() coeffs, rem = hcba_ext.calculateCoeffs(tid, sup, sens_ind, S, F, Rev_Fd) # The initial objective => Elastic filtering cpx = cplex.Cplex() cpx.set_results_stream(None) # Add obj. sense and columns cpx.objective.set_sense(cpx.objective.sense.minimize) cpx.variables.add(obj = coeffs, lb =[0]*len(coeffs), ub=[1]*len(coeffs), types=[cpx.variables.type.integer]*len(coeffs)) # Build constraints for minimal S for itemset in S: ind = [] cur_supp = 0 for i in xrange(len(sens_ind)): if itemset.issubset(tid[sens_ind[i]]): ind.append(i) cur_supp += 1 cpx.linear_constraints.add(lin_expr = [SparsePair(ind = ind, val=[1]*len(ind))], senses=["G"], rhs=[cur_supp - abs_supp + 1], names=["c"+str(k)]) k+=1 cpx.solve() solution = map(int, cpx.solution.get_values()) # Apply sanitization for i in hcba_ext.get_indices(solution, 1): tid[sens_ind[i]] = tid[sens_ind[i]] - rem[i] change_raw_data += len(rem[i]) coeffs = None cpx = None F = None Rev_Fd = None exec_time = clock()-start_time ######----create out files-----###### out_file = open(mod_name+'_results.txt', 'w') for i in xrange(lines): k = ' '.join(sorted(tid[i])) print(k, file = out_file) out_file.close() tid = None return("Not Applicable", change_raw_data, rev_t+exec_time)
kagklis/Frequent-Itemset-Hiding-Toolbox-x86
Heuristic_Coeff.py
Python
mit
4,188
from urlparse import urlparse import pytest import os from ..automation import TaskManager from ..automation.Errors import BrowserConfigError from ..automation.utilities.platform_utils import fetch_adblockplus_list import utilities import expected psl = utilities.get_psl() class TestABP(): NUM_BROWSERS = 1 def get_config(self, data_dir): manager_params, browser_params = TaskManager.load_default_params(self.NUM_BROWSERS) manager_params['data_directory'] = data_dir manager_params['log_directory'] = data_dir browser_params[0]['headless'] = True browser_params[0]['http_instrument'] = True browser_params[0]['adblock-plus'] = True return manager_params, browser_params def test_list_fetch(self, tmpdir): data_dir = str(tmpdir) fetch_adblockplus_list(data_dir) assert os.path.isfile(os.path.join(data_dir, 'patterns.ini')) assert os.path.isfile(os.path.join(data_dir, 'elemhide.css')) def test_blocks_includes(self, tmpdir): data_dir = str(tmpdir) list_loc = os.path.join(data_dir, 'adblock_plus') manager_params, browser_params = self.get_config(data_dir) fetch_adblockplus_list(list_loc) browser_params[0]['adblock-plus_list_location'] = list_loc manager = TaskManager.TaskManager(manager_params, browser_params) manager.get(utilities.BASE_TEST_URL + '/abp/adblock_plus_test.html') manager.close() db = os.path.join(data_dir, manager_params['database_name']) rows = utilities.query_db(db, "SELECT url FROM http_requests") urls = set() for url, in rows: ps1 = psl.get_public_suffix(urlparse(url).hostname) # exclude requests to safebrowsing and tracking protection backends if ps1 not in ("mozilla.com", "mozilla.net"): urls.add(url) assert urls == expected.adblockplus def test_error_with_missing_option(self, tmpdir): manager_params, browser_params = self.get_config(str(tmpdir)) with pytest.raises(BrowserConfigError): manager = TaskManager.TaskManager(manager_params, browser_params) manager.close() def test_error_with_missing_list(self, tmpdir): data_dir = str(tmpdir) list_loc = os.path.join(data_dir, 'adblock_plus') manager_params, browser_params = self.get_config(data_dir) browser_params[0]['adblock-plus_list_location'] = list_loc with pytest.raises(BrowserConfigError): manager = TaskManager.TaskManager(manager_params, browser_params) manager.close()
tommybananas/OpenWPM
test/test_adblock_plus.py
Python
gpl-3.0
2,649
import csv import StringIO from util import hook, http, text gauge_url = "http://www.mysteamgauge.com/search?username={}" api_url = "http://mysteamgauge.com/user/{}.csv" steam_api_url = "http://steamcommunity.com/id/{}/?xml=1" def refresh_data(name): http.get(gauge_url.format(name), timeout=25, get_method='HEAD') def get_data(name): return http.get(api_url.format(name)) def is_number(s): try: float(s) return True except ValueError: return False def unicode_dictreader(utf8_data, **kwargs): csv_reader = csv.DictReader(utf8_data, **kwargs) for row in csv_reader: yield dict([(key.lower(), unicode(value, 'utf-8')) for key, value in row.iteritems()]) @hook.command('sc') @hook.command def steamcalc(inp, reply=None): """steamcalc <username> [currency] - Gets value of steam account and total hours played. Uses steamcommunity.com/id/<nickname>. """ # check if the user asked us to force reload force_reload = inp.endswith(" forcereload") if force_reload: name = inp[:-12].strip().lower() else: name = inp.strip() if force_reload: try: reply("Collecting data, this may take a while.") refresh_data(name) request = get_data(name) do_refresh = False except (http.HTTPError, http.URLError): return "Could not get data for this user." else: try: request = get_data(name) do_refresh = True except (http.HTTPError, http.URLError): try: reply("Collecting data, this may take a while.") refresh_data(name) request = get_data(name) do_refresh = False except (http.HTTPError, http.URLError): return "Could not get data for this user." csv_data = StringIO.StringIO(request) # we use StringIO because CSV can't read a string reader = unicode_dictreader(csv_data) # put the games in a list games = [] for row in reader: games.append(row) data = {} # basic information steam_profile = http.get_xml(steam_api_url.format(name)) try: data["name"] = steam_profile.find('steamID').text online_state = steam_profile.find('stateMessage').text except AttributeError: return "Could not get data for this user." online_state = online_state.replace("<br/>", ": ") # will make this pretty later data["state"] = text.strip_html(online_state) # work out the average metascore for all games ms = [float(game["metascore"]) for game in games if is_number(game["metascore"])] metascore = float(sum(ms)) / len(ms) if len(ms) > 0 else float('nan') data["average_metascore"] = "{0:.1f}".format(metascore) # work out the totals data["games"] = len(games) total_value = sum([float(game["value"]) for game in games if is_number(game["value"])]) data["value"] = str(int(round(total_value))) # work out the total size total_size = 0.0 for game in games: if not is_number(game["size"]): continue if game["unit"] == "GB": total_size += float(game["size"]) else: total_size += float(game["size"]) / 1024 data["size"] = "{0:.1f}".format(total_size) reply("{name} ({state}) has {games} games with a total value of ${value}" " and a total size of {size}GB! The average metascore for these" " games is {average_metascore}.".format(**data)) if do_refresh: refresh_data(name)
Red-M/CloudBot-legacy
plugins/steam_calc.py
Python
gpl-3.0
3,611
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the OSL-3.0 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django.conf import settings from django.utils.translation import ugettext_lazy as _ from shuup.admin.toolbar import Toolbar from shuup.admin.utils.picotable import ChoicesFilter, Column, TextFilter from shuup.admin.utils.views import PicotableListView from shuup.core.models import Shop, ShopStatus class ShopListView(PicotableListView): model = Shop default_columns = [ Column("name", _(u"Name"), sort_field="translations__name", display="name", filter_config=TextFilter( filter_field="translations__name", placeholder=_("Filter by name...") )), Column("domain", _(u"Domain")), Column("identifier", _(u"Identifier")), Column("status", _(u"Status"), filter_config=ChoicesFilter(choices=ShopStatus.choices)), ] def get_toolbar(self): if settings.SHUUP_ENABLE_MULTIPLE_SHOPS: return super(ShopListView, self).get_toolbar() else: return Toolbar([])
suutari-ai/shoop
shuup/admin/modules/shops/views/list.py
Python
agpl-3.0
1,270
import pymel.core as pm import pymel.util as pmUtil import pymel.core.datatypes as dt from maya import OpenMaya as om from maya import cmds def lockObjXfo(dccSceneItem): """Locks the dccSceneItem's transform parameters. Args: dccSceneItem (Object): DCC object to lock transform parameters on. Returns: bool: True if successful. """ localXfoParams = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'] for eachParam in localXfoParams: pm.setAttr(dccSceneItem.longName() + "." + eachParam, lock=True, keyable=False, channelBox=False) return True
goshow-jp/Kraken
Python/kraken/plugins/maya_plugin/utils/__init__.py
Python
bsd-3-clause
607
""" Support tool for changing course enrollments. """ from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user from django.db import transaction from django.db.models import Q from django.http import HttpResponseBadRequest from django.urls import reverse from django.utils.decorators import method_decorator from django.views.generic import View from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from rest_framework.generics import GenericAPIView from common.djangoapps.course_modes.models import CourseMode from common.djangoapps.edxmako.shortcuts import render_to_response from common.djangoapps.student.models import ( ENROLLED_TO_ENROLLED, UNENROLLED_TO_ENROLLED, CourseEnrollment, CourseEnrollmentAttribute, ManualEnrollmentAudit ) from common.djangoapps.util.json_request import JsonResponse from common.djangoapps.entitlements.models import CourseEntitlement from lms.djangoapps.support.decorators import require_support_permission from lms.djangoapps.support.serializers import ManualEnrollmentSerializer from lms.djangoapps.verify_student.models import VerificationDeadline from openedx.core.djangoapps.credit.email_utils import get_credit_provider_attribute_values from openedx.core.djangoapps.enrollments.api import get_enrollments, update_enrollment from openedx.core.djangoapps.enrollments.errors import CourseModeNotFoundError from openedx.core.djangoapps.enrollments.serializers import ModeSerializer class EnrollmentSupportView(View): """ View for viewing and changing learner enrollments, used by the support team. """ @method_decorator(require_support_permission) def get(self, request): """Render the enrollment support tool view.""" return render_to_response('support/enrollment.html', { 'username': request.GET.get('user', ''), 'enrollmentsUrl': reverse('support:enrollment_list'), 'enrollmentSupportUrl': reverse('support:enrollment') }) class EnrollmentSupportListView(GenericAPIView): """ Allows viewing and changing learner enrollments by support staff. """ # TODO: ARCH-91 # This view is excluded from Swagger doc generation because it # does not specify a serializer class. exclude_from_schema = True @method_decorator(require_support_permission) def get(self, request, username_or_email): """ Returns a list of enrollments for the given user, along with information about previous manual enrollment changes. """ try: user = User.objects.get(Q(username=username_or_email) | Q(email=username_or_email)) except User.DoesNotExist: return JsonResponse([]) enrollments = get_enrollments(user.username, include_inactive=True) for enrollment in enrollments: # Folds the course_details field up into the main JSON object. enrollment.update(**enrollment.pop('course_details')) course_key = CourseKey.from_string(enrollment['course_id']) # get the all courses modes and replace with existing modes. enrollment['course_modes'] = self.get_course_modes(course_key) # Add the price of the course's verified mode. self.include_verified_mode_info(enrollment, course_key) # Add manual enrollment history, if it exists enrollment['manual_enrollment'] = self.manual_enrollment_data(enrollment, course_key) return JsonResponse(enrollments) @method_decorator(require_support_permission) def post(self, request, username_or_email): """ Allows support staff to create a user's enrollment. """ try: course_id = request.data['course_id'] course_key = CourseKey.from_string(course_id) mode = request.data['mode'] reason = request.data['reason'] user = User.objects.get(Q(username=username_or_email) | Q(email=username_or_email)) except KeyError as err: return HttpResponseBadRequest(f'The field {str(err)} is required.') except InvalidKeyError: return HttpResponseBadRequest('Could not parse course key.') except User.DoesNotExist: return HttpResponseBadRequest( 'Could not find user {username}.'.format( username=username_or_email ) ) enrollment = CourseEnrollment.get_enrollment(user=user, course_key=course_key) if enrollment is not None: return HttpResponseBadRequest( f'The user {str(username_or_email)} is already enrolled in {str(course_id)}.' ) enrollment_modes = [ enrollment_mode['slug'] for enrollment_mode in self.get_course_modes(course_key) ] if mode not in enrollment_modes: return HttpResponseBadRequest( f'{str(mode)} is not a valid mode for {str(course_id)}. ' f'Possible valid modes are {str(enrollment_modes)}' ) enrollment = CourseEnrollment.enroll(user=user, course_key=course_key, mode=mode) # Wrapped in a transaction so that we can be sure the # ManualEnrollmentAudit record is always created correctly. with transaction.atomic(): manual_enrollment = ManualEnrollmentAudit.create_manual_enrollment_audit( request.user, enrollment.user.email, UNENROLLED_TO_ENROLLED, reason=reason, enrollment=enrollment ) return JsonResponse(ManualEnrollmentSerializer(instance=manual_enrollment).data) @method_decorator(require_support_permission) def patch(self, request, username_or_email): """Allows support staff to alter a user's enrollment.""" try: user = User.objects.get(Q(username=username_or_email) | Q(email=username_or_email)) course_id = request.data['course_id'] course_key = CourseKey.from_string(course_id) old_mode = request.data['old_mode'] new_mode = request.data['new_mode'] reason = request.data['reason'] enrollment = CourseEnrollment.objects.get(user=user, course_id=course_key) if enrollment.mode != old_mode: return HttpResponseBadRequest('User {username} is not enrolled with mode {old_mode}.'.format( username=user.username, old_mode=old_mode )) except KeyError as err: return HttpResponseBadRequest(f'The field {str(err)} is required.') except InvalidKeyError: return HttpResponseBadRequest('Could not parse course key.') except (CourseEnrollment.DoesNotExist, User.DoesNotExist): return HttpResponseBadRequest( 'Could not find enrollment for user {username} in course {course}.'.format( username=username_or_email, course=str(course_key) ) ) try: # Wrapped in a transaction so that we can be sure the # ManualEnrollmentAudit record is always created correctly. with transaction.atomic(): update_enrollment(user.username, course_id, mode=new_mode, include_expired=True) manual_enrollment = ManualEnrollmentAudit.create_manual_enrollment_audit( request.user, enrollment.user.email, ENROLLED_TO_ENROLLED, reason=reason, enrollment=enrollment ) if new_mode == CourseMode.CREDIT_MODE: provider_ids = get_credit_provider_attribute_values(course_key, 'id') credit_provider_attr = { 'namespace': 'credit', 'name': 'provider_id', 'value': provider_ids[0], } CourseEnrollmentAttribute.add_enrollment_attr( enrollment=enrollment, data_list=[credit_provider_attr] ) entitlement = CourseEntitlement.get_fulfillable_entitlement_for_user_course_run( user=user, course_run_key=course_id ) if entitlement is not None and entitlement.mode == new_mode: entitlement.set_enrollment(CourseEnrollment.get_enrollment(user, course_id)) return JsonResponse(ManualEnrollmentSerializer(instance=manual_enrollment).data) except CourseModeNotFoundError as err: return HttpResponseBadRequest(str(err)) @staticmethod def include_verified_mode_info(enrollment_data, course_key): """ Add information about the verified mode for the given `course_key`, if that course has a verified mode. Args: enrollment_data (dict): Dictionary representing a single enrollment. course_key (CourseKey): The course which this enrollment belongs to. Returns: None """ course_modes = enrollment_data['course_modes'] for mode in course_modes: if mode['slug'] == CourseMode.VERIFIED: enrollment_data['verified_price'] = mode['min_price'] enrollment_data['verified_upgrade_deadline'] = mode['expiration_datetime'] enrollment_data['verification_deadline'] = VerificationDeadline.deadline_for_course(course_key) @staticmethod def manual_enrollment_data(enrollment_data, course_key): """ Returns serialized information about the manual enrollment belonging to this enrollment, if it exists. Args: enrollment_data (dict): Representation of a single course enrollment. course_key (CourseKey): The course for this enrollment. Returns: None: If no manual enrollment change has been made. dict: Serialization of the latest manual enrollment change. """ user = User.objects.get(username=enrollment_data['user']) enrollment = CourseEnrollment.get_enrollment(user, course_key) manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment(enrollment) if manual_enrollment_audit is None: return {} return ManualEnrollmentSerializer(instance=manual_enrollment_audit).data @staticmethod def get_course_modes(course_key): """ Returns a list of all modes including expired modes for a given course id Arguments: course_id (CourseKey): Search for course modes for this course. Returns: list of `Mode` """ course_modes = CourseMode.modes_for_course( course_key, include_expired=True, only_selectable=False, ) return [ ModeSerializer(mode).data for mode in course_modes ]
edx/edx-platform
lms/djangoapps/support/views/enrollments.py
Python
agpl-3.0
11,159
#!/usr/bin/python2 import processing.brain import sys brain = processing.brain.Brain() cycle = True runtime = 0 while(cycle): brain.process() runtime += 1
InfiniTower/Omni
omni.py
Python
gpl-2.0
170
# Copyright (c) 2019 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. import os from UM.i18n import i18nCatalog from UM.Logger import Logger from UM.Mesh.MeshWriter import MeshWriter #To get the g-code output. from UM.PluginRegistry import PluginRegistry #To get the g-code output. from UM.Qt.Duration import DurationFormat from cura.CuraApplication import CuraApplication from cura.PrinterOutput.PrinterOutputDevice import PrinterOutputDevice, ConnectionState, ConnectionType from cura.PrinterOutput.Models.PrinterOutputModel import PrinterOutputModel from cura.PrinterOutput.Models.PrintJobOutputModel import PrintJobOutputModel from cura.PrinterOutput.GenericOutputController import GenericOutputController from .AutoDetectBaudJob import AutoDetectBaudJob from .AvrFirmwareUpdater import AvrFirmwareUpdater from io import StringIO #To write the g-code output. from queue import Queue from serial import Serial, SerialException, SerialTimeoutException from threading import Thread, Event from time import time from typing import Union, Optional, List, cast import re import functools # Used for reduce catalog = i18nCatalog("cura") class USBPrinterOutputDevice(PrinterOutputDevice): def __init__(self, serial_port: str, baud_rate: Optional[int] = None) -> None: super().__init__(serial_port, connection_type = ConnectionType.UsbConnection) self.setName(catalog.i18nc("@item:inmenu", "USB printing")) self.setShortDescription(catalog.i18nc("@action:button Preceded by 'Ready to'.", "Print via USB")) self.setDescription(catalog.i18nc("@info:tooltip", "Print via USB")) self.setIconName("print") self._serial = None # type: Optional[Serial] self._serial_port = serial_port self._address = serial_port self._timeout = 3 # List of gcode lines to be printed self._gcode = [] # type: List[str] self._gcode_position = 0 self._use_auto_detect = True self._baud_rate = baud_rate self._all_baud_rates = [115200, 250000, 500000, 230400, 57600, 38400, 19200, 9600] # Instead of using a timer, we really need the update to be as a thread, as reading from serial can block. self._update_thread = Thread(target = self._update, daemon = True) self._last_temperature_request = None # type: Optional[int] self._firmware_idle_count = 0 self._is_printing = False # A print is being sent. ## Set when print is started in order to check running time. self._print_start_time = None # type: Optional[float] self._print_estimated_time = None # type: Optional[int] self._accepts_commands = True self._paused = False self._printer_busy = False # When printer is preheating and waiting (M190/M109), or when waiting for action on the printer self.setConnectionText(catalog.i18nc("@info:status", "Connected via USB")) # Queue for commands that need to be sent. self._command_queue = Queue() # type: Queue # Event to indicate that an "ok" was received from the printer after sending a command. self._command_received = Event() self._command_received.set() self._firmware_name_requested = False self._firmware_updater = AvrFirmwareUpdater(self) self._monitor_view_qml_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "MonitorItem.qml") CuraApplication.getInstance().getOnExitCallbackManager().addCallback(self._checkActivePrintingUponAppExit) # This is a callback function that checks if there is any printing in progress via USB when the application tries # to exit. If so, it will show a confirmation before def _checkActivePrintingUponAppExit(self) -> None: application = CuraApplication.getInstance() if not self._is_printing: # This USB printer is not printing, so we have nothing to do. Call the next callback if exists. application.triggerNextExitCheck() return application.setConfirmExitDialogCallback(self._onConfirmExitDialogResult) application.showConfirmExitDialog.emit(catalog.i18nc("@label", "A USB print is in progress, closing Cura will stop this print. Are you sure?")) def _onConfirmExitDialogResult(self, result: bool) -> None: if result: application = CuraApplication.getInstance() application.triggerNextExitCheck() ## Reset USB device settings # def resetDeviceSettings(self) -> None: self._firmware_name = None ## Request the current scene to be sent to a USB-connected printer. # # \param nodes A collection of scene nodes to send. This is ignored. # \param file_name \type{string} A suggestion for a file name to write. # \param filter_by_machine Whether to filter MIME types by machine. This # is ignored. # \param kwargs Keyword arguments. def requestWrite(self, nodes, file_name = None, filter_by_machine = False, file_handler = None, **kwargs): if self._is_printing: return # Already printing self.writeStarted.emit(self) # cancel any ongoing preheat timer before starting a print self._printers[0].getController().stopPreheatTimers() CuraApplication.getInstance().getController().setActiveStage("MonitorStage") #Find the g-code to print. gcode_textio = StringIO() gcode_writer = cast(MeshWriter, PluginRegistry.getInstance().getPluginObject("GCodeWriter")) success = gcode_writer.write(gcode_textio, None) if not success: return self._printGCode(gcode_textio.getvalue()) ## Start a print based on a g-code. # \param gcode The g-code to print. def _printGCode(self, gcode: str): self._gcode.clear() self._paused = False self._gcode.extend(gcode.split("\n")) # Reset line number. If this is not done, first line is sometimes ignored self._gcode.insert(0, "M110") self._gcode_position = 0 self._print_start_time = time() self._print_estimated_time = int(CuraApplication.getInstance().getPrintInformation().currentPrintTime.getDisplayString(DurationFormat.Format.Seconds)) for i in range(0, 4): # Push first 4 entries before accepting other inputs self._sendNextGcodeLine() self._is_printing = True self.writeFinished.emit(self) def _autoDetectFinished(self, job: AutoDetectBaudJob): result = job.getResult() if result is not None: self.setBaudRate(result) self.connect() # Try to connect (actually create serial, etc) def setBaudRate(self, baud_rate: int): if baud_rate not in self._all_baud_rates: Logger.log("w", "Not updating baudrate to {baud_rate} as it's an unknown baudrate".format(baud_rate=baud_rate)) return self._baud_rate = baud_rate def connect(self): self._firmware_name = None # after each connection ensure that the firmware name is removed if self._baud_rate is None: if self._use_auto_detect: auto_detect_job = AutoDetectBaudJob(self._serial_port) auto_detect_job.start() auto_detect_job.finished.connect(self._autoDetectFinished) return if self._serial is None: try: self._serial = Serial(str(self._serial_port), self._baud_rate, timeout=self._timeout, writeTimeout=self._timeout) except SerialException: Logger.log("w", "An exception occured while trying to create serial connection") return CuraApplication.getInstance().globalContainerStackChanged.connect(self._onGlobalContainerStackChanged) self._onGlobalContainerStackChanged() self.setConnectionState(ConnectionState.Connected) self._update_thread.start() def _onGlobalContainerStackChanged(self): container_stack = CuraApplication.getInstance().getGlobalContainerStack() num_extruders = container_stack.getProperty("machine_extruder_count", "value") # Ensure that a printer is created. controller = GenericOutputController(self) controller.setCanUpdateFirmware(True) self._printers = [PrinterOutputModel(output_controller = controller, number_of_extruders = num_extruders)] self._printers[0].updateName(container_stack.getName()) def close(self): super().close() if self._serial is not None: self._serial.close() # Re-create the thread so it can be started again later. self._update_thread = Thread(target=self._update, daemon=True) self._serial = None ## Send a command to printer. def sendCommand(self, command: Union[str, bytes]): if not self._command_received.is_set(): self._command_queue.put(command) else: self._sendCommand(command) def _sendCommand(self, command: Union[str, bytes]): if self._serial is None or self._connection_state != ConnectionState.Connected: return new_command = cast(bytes, command) if type(command) is bytes else cast(str, command).encode() # type: bytes if not new_command.endswith(b"\n"): new_command += b"\n" try: self._command_received.clear() self._serial.write(new_command) except SerialTimeoutException: Logger.log("w", "Timeout when sending command to printer via USB.") self._command_received.set() except SerialException: Logger.logException("w", "An unexpected exception occurred while writing to the serial.") self.setConnectionState(ConnectionState.Error) def _update(self): while self._connection_state == ConnectionState.Connected and self._serial is not None: try: line = self._serial.readline() except: continue if not self._firmware_name_requested: self._firmware_name_requested = True self.sendCommand("M115") if b"FIRMWARE_NAME:" in line: self._setFirmwareName(line) if self._last_temperature_request is None or time() > self._last_temperature_request + self._timeout: # Timeout, or no request has been sent at all. if not self._printer_busy: # Don't flood the printer with temperature requests while it is busy self.sendCommand("M105") self._last_temperature_request = time() if re.search(b"[B|T\d*]: ?\d+\.?\d*", line): # Temperature message. 'T:' for extruder and 'B:' for bed extruder_temperature_matches = re.findall(b"T(\d*): ?(\d+\.?\d*)\s*\/?(\d+\.?\d*)?", line) # Update all temperature values matched_extruder_nrs = [] for match in extruder_temperature_matches: extruder_nr = 0 if match[0] != b"": extruder_nr = int(match[0]) if extruder_nr in matched_extruder_nrs: continue matched_extruder_nrs.append(extruder_nr) if extruder_nr >= len(self._printers[0].extruders): Logger.log("w", "Printer reports more temperatures than the number of configured extruders") continue extruder = self._printers[0].extruders[extruder_nr] if match[1]: extruder.updateHotendTemperature(float(match[1])) if match[2]: extruder.updateTargetHotendTemperature(float(match[2])) bed_temperature_matches = re.findall(b"B: ?(\d+\.?\d*)\s*\/?(\d+\.?\d*)?", line) if bed_temperature_matches: match = bed_temperature_matches[0] if match[0]: self._printers[0].updateBedTemperature(float(match[0])) if match[1]: self._printers[0].updateTargetBedTemperature(float(match[1])) if line == b"": # An empty line means that the firmware is idle # Multiple empty lines probably means that the firmware and Cura are waiting # for eachother due to a missed "ok", so we keep track of empty lines self._firmware_idle_count += 1 else: self._firmware_idle_count = 0 if line.startswith(b"ok") or self._firmware_idle_count > 1: self._printer_busy = False self._command_received.set() if not self._command_queue.empty(): self._sendCommand(self._command_queue.get()) elif self._is_printing: if self._paused: pass # Nothing to do! else: self._sendNextGcodeLine() if line.startswith(b"echo:busy:"): self._printer_busy = True if self._is_printing: if line.startswith(b'!!'): Logger.log('e', "Printer signals fatal error. Cancelling print. {}".format(line)) self.cancelPrint() elif line.lower().startswith(b"resend") or line.startswith(b"rs"): # A resend can be requested either by Resend, resend or rs. try: self._gcode_position = int(line.replace(b"N:", b" ").replace(b"N", b" ").replace(b":", b" ").split()[-1]) except: if line.startswith(b"rs"): # In some cases of the RS command it needs to be handled differently. self._gcode_position = int(line.split()[1]) def _setFirmwareName(self, name): new_name = re.findall(r"FIRMWARE_NAME:(.*);", str(name)) if new_name: self._firmware_name = new_name[0] Logger.log("i", "USB output device Firmware name: %s", self._firmware_name) else: self._firmware_name = "Unknown" Logger.log("i", "Unknown USB output device Firmware name: %s", str(name)) def getFirmwareName(self): return self._firmware_name def pausePrint(self): self._paused = True def resumePrint(self): self._paused = False self._sendNextGcodeLine() #Send one line of g-code next so that we'll trigger an "ok" response loop even if we're not polling temperatures. def cancelPrint(self): self._gcode_position = 0 self._gcode.clear() self._printers[0].updateActivePrintJob(None) self._is_printing = False self._paused = False # Turn off temperatures, fan and steppers self._sendCommand("M140 S0") self._sendCommand("M104 S0") self._sendCommand("M107") # Home XY to prevent nozzle resting on aborted print # Don't home bed because it may crash the printhead into the print on printers that home on the bottom self.printers[0].homeHead() self._sendCommand("M84") def _sendNextGcodeLine(self): if self._gcode_position >= len(self._gcode): self._printers[0].updateActivePrintJob(None) self._is_printing = False return line = self._gcode[self._gcode_position] if ";" in line: line = line[:line.find(";")] line = line.strip() # Don't send empty lines. But we do have to send something, so send M105 instead. # Don't send the M0 or M1 to the machine, as M0 and M1 are handled as an LCD menu pause. if line == "" or line == "M0" or line == "M1": line = "M105" checksum = functools.reduce(lambda x, y: x ^ y, map(ord, "N%d%s" % (self._gcode_position, line))) self._sendCommand("N%d%s*%d" % (self._gcode_position, line, checksum)) print_job = self._printers[0].activePrintJob try: progress = self._gcode_position / len(self._gcode) except ZeroDivisionError: # There is nothing to send! if print_job is not None: print_job.updateState("error") return elapsed_time = int(time() - self._print_start_time) if print_job is None: controller = GenericOutputController(self) controller.setCanUpdateFirmware(True) print_job = PrintJobOutputModel(output_controller=controller, name=CuraApplication.getInstance().getPrintInformation().jobName) print_job.updateState("printing") self._printers[0].updateActivePrintJob(print_job) print_job.updateTimeElapsed(elapsed_time) estimated_time = self._print_estimated_time if progress > .1: estimated_time = self._print_estimated_time * (1 - progress) + elapsed_time print_job.updateTimeTotal(estimated_time) self._gcode_position += 1
Patola/Cura
plugins/USBPrinting/USBPrinterOutputDevice.py
Python
lgpl-3.0
17,272
# -*- coding: utf-8 -*- """ Created on Wed Apr 05 17:10:19 2017 @author: Luca """ import sys import serial # import Serial Library import drawnow as drawnow import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import interp1d x_eval = np.linspace(0, 1024, 1024) def Connect(channel, channel2): ch = 0 i = 0 arduinoData = serial.Serial('com4', 115200) #Creating our serial object named arduinoData plt.ion() #Tell matplotlib you want interactive mode to plot live data while True: # While loop that loops forever while (arduinoData.inWaiting()==0): #Wait here until there is data pass #do nothing try: arduinoString = arduinoData.readline() #read the line of text from the serial port print i ch = int(arduinoString) #Convert first element to floating number and put in temp channel[ch] = channel[ch] + 1 channel = running_mean(channel, 4) if (i >= 1000): i = 0 np.savetxt('counts.out', channel, delimiter=' ') # X is an array drawnow.drawnow(makeFig) plt.pause(.00000001) #Pause Briefly. Important to keep drawnow from crashing i =i+1 except ValueError: print"read wrong value" ### Running mean/Moving average def running_mean(x, N): b =np.zeros((len(x),), dtype=np.float) a = np.linspace(1, len(x), num=len(x), endpoint=True) c = (N/2.0) * np.linspace(1, int(len(x)/N), num=int(len(x)/N), endpoint=True) f = np.linspace(int(-N/2),int(N/2), num=int(len(x)/N), endpoint=False) for i in range(0,int(len(x)/N)): for z in range(0, N): b[i] += x[(f[z]*i)] b[i] = b[i]/N #d = interp1d(a, b, kind='cubic') return (b) def makeFig(): plt.title('Channel from Arduino') plt.grid(True) plt.ylabel('Counts') plt.ylim(0,float(np.amax(channel)*1.15)) plt.xlim(0,len(channel)) plt.grid(True) plt.plot(channel, 'rx-', label='Channels') plt.legend(loc='upper right') print("R for random, S for serial inup") mode = raw_input("Please enter something: ") channel = np.zeros((1024,), dtype=np.int) channel2 = np.zeros((1024,), dtype=np.float) while (True): if (mode == "R"): mode = raw_input("Please enter something: ") break elif (mode == "S"): Connect(channel, channel2) break else: print("wrong input") mode = raw_input("Please enter something: ") break
bemxgm/Radon-Monitor
Code/PC dataloggher/PC dataloggher.py
Python
gpl-3.0
2,686
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for git_cl.py.""" import os import StringIO import stat import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from testing_support.auto_stub import TestCase import git_cl import git_common import subprocess2 class PresubmitMock(object): def __init__(self, *args, **kwargs): self.reviewers = [] @staticmethod def should_continue(): return True class RietveldMock(object): def __init__(self, *args, **kwargs): pass @staticmethod def get_description(issue): return 'Issue: %d' % issue @staticmethod def get_issue_properties(_issue, _messages): return { 'reviewers': ['joe@chromium.org', 'john@chromium.org'], 'messages': [ { 'approval': True, 'sender': 'john@chromium.org', }, ], } class WatchlistsMock(object): def __init__(self, _): pass @staticmethod def GetWatchersForPaths(_): return ['joe@example.com'] class CodereviewSettingsFileMock(object): def __init__(self): pass # pylint: disable=R0201 def read(self): return ("CODE_REVIEW_SERVER: gerrit.chromium.org\n" + "GERRIT_HOST: gerrit.chromium.org\n" + "GERRIT_PORT: 29418\n") class TestGitCl(TestCase): def setUp(self): super(TestGitCl, self).setUp() self.calls = [] self._calls_done = 0 self.mock(subprocess2, 'call', self._mocked_call) self.mock(subprocess2, 'check_call', self._mocked_call) self.mock(subprocess2, 'check_output', self._mocked_call) self.mock(subprocess2, 'communicate', self._mocked_call) self.mock(subprocess2, 'Popen', self._mocked_call) self.mock(git_common, 'get_or_create_merge_base', lambda *a: ( self._mocked_call(['get_or_create_merge_base']+list(a)))) self.mock(git_cl, 'FindCodereviewSettingsFile', lambda: '') self.mock(git_cl, 'ask_for_data', self._mocked_call) self.mock(git_cl.breakpad, 'post', self._mocked_call) self.mock(git_cl.breakpad, 'SendStack', self._mocked_call) self.mock(git_cl.presubmit_support, 'DoPresubmitChecks', PresubmitMock) self.mock(git_cl.rietveld, 'Rietveld', RietveldMock) self.mock(git_cl.rietveld, 'CachingRietveld', RietveldMock) self.mock(git_cl.upload, 'RealMain', self.fail) self.mock(git_cl.watchlists, 'Watchlists', WatchlistsMock) # It's important to reset settings to not have inter-tests interference. git_cl.settings = None def tearDown(self): if not self.has_failed(): self.assertEquals([], self.calls) super(TestGitCl, self).tearDown() def _mocked_call(self, *args, **_kwargs): self.assertTrue( self.calls, '@%d Expected: <Missing> Actual: %r' % (self._calls_done, args)) expected_args, result = self.calls.pop(0) # Also logs otherwise it could get caught in a try/finally and be hard to # diagnose. if expected_args != args: msg = '@%d Expected: %r Actual: %r' % ( self._calls_done, expected_args, args) git_cl.logging.error(msg) self.fail(msg) self._calls_done += 1 return result @classmethod def _upload_calls(cls, similarity, find_copies, private): return (cls._git_base_calls(similarity, find_copies) + cls._git_upload_calls(private)) @classmethod def _upload_no_rev_calls(cls, similarity, find_copies): return (cls._git_base_calls(similarity, find_copies) + cls._git_upload_no_rev_calls()) @classmethod def _git_base_calls(cls, similarity, find_copies): if similarity is None: similarity = '50' similarity_call = ((['git', 'config', '--int', '--get', 'branch.master.git-cl-similarity'],), '') else: similarity_call = ((['git', 'config', '--int', 'branch.master.git-cl-similarity', similarity],), '') if find_copies is None: find_copies = True find_copies_call = ((['git', 'config', '--int', '--get', 'branch.master.git-find-copies'],), '') else: val = str(int(find_copies)) find_copies_call = ((['git', 'config', '--int', 'branch.master.git-find-copies', val],), '') if find_copies: stat_call = ((['git', 'diff', '--no-ext-diff', '--stat', '--find-copies-harder', '-l100000', '-C'+similarity, 'fake_ancestor_sha', 'HEAD'],), '+dat') else: stat_call = ((['git', 'diff', '--no-ext-diff', '--stat', '-M'+similarity, 'fake_ancestor_sha', 'HEAD'],), '+dat') return [ ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server'],), 'codereview.example.com'), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), similarity_call, ((['git', 'symbolic-ref', 'HEAD'],), 'master'), find_copies_call, ((['git', 'update-index', '--refresh', '-q'],), ''), ((['git', 'diff-index', '--name-status', 'HEAD'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', 'branch.master.merge'],), 'master'), ((['git', 'config', 'branch.master.remote'],), 'origin'), ((['get_or_create_merge_base', 'master', 'master'],), 'fake_ancestor_sha'), ] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [ ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'rev-parse', 'HEAD'],), '12345'), ((['git', 'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...', '.'],), 'M\t.gitignore\n'), ((['git', 'config', 'branch.master.rietveldissue'],), ''), ((['git', 'config', 'branch.master.rietveldpatchset'],), ''), ((['git', 'log', '--pretty=format:%s%n%n%b', 'fake_ancestor_sha...'],), 'foo'), ((['git', 'config', 'user.email'],), 'me@example.com'), stat_call, ((['git', 'config', 'gerrit.host'],), ''), ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), 'desc\n'), ((['git', 'config', 'rietveld.bug-prefix'],), ''), ] @classmethod def _git_upload_no_rev_calls(cls): return [ ((['git', 'config', 'core.editor'],), ''), ] @classmethod def _git_upload_calls(cls, private): if private: cc_call = [] private_call = [] else: cc_call = [((['git', 'config', 'rietveld.cc'],), '')] private_call = [ ((['git', 'config', 'rietveld.private'],), '')] return [ ((['git', 'config', 'core.editor'],), ''), ] + cc_call + private_call + [ ((['git', 'config', 'branch.master.base-url'],), ''), ((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],), (('', None), 0)), ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'svn', 'info'],), ''), ((['git', 'config', 'branch.master.rietveldissue', '1'],), ''), ((['git', 'config', 'branch.master.rietveldserver', 'https://codereview.example.com'],), ''), ((['git', 'config', 'branch.master.rietveldpatchset', '2'],), ''), ((['git', 'rev-parse', 'HEAD'],), 'hash'), ((['git', 'symbolic-ref', 'HEAD'],), 'hash'), ((['git', 'config', 'branch.hash.last-upload-hash', 'hash'],), ''), ] @staticmethod def _git_sanity_checks(diff_base, working_branch): fake_ancestor = 'fake_ancestor' fake_cl = 'fake_cl_for_patch' return [ # Calls to verify branch point is ancestor ((['git', 'rev-parse', '--verify', diff_base],), fake_ancestor), ((['git', 'merge-base', fake_ancestor, 'HEAD'],), fake_ancestor), ((['git', 'rev-list', '^' + fake_ancestor, 'HEAD'],), fake_cl), # Mock a config miss (error code 1) ((['git', 'config', 'gitcl.remotebranch'],), (('', None), 1)), # Call to GetRemoteBranch() ((['git', 'config', 'branch.%s.merge' % working_branch],), 'refs/heads/master'), ((['git', 'config', 'branch.%s.remote' % working_branch],), 'origin'), ((['git', 'rev-list', '^' + fake_ancestor, 'refs/remotes/origin/master'],), ''), ] @classmethod def _dcommit_calls_1(cls): return [ ((['git', 'config', '--local', '--get-regexp', '^svn-remote\\.'],), ((('svn-remote.svn.url svn://svn.chromium.org/chrome\n' 'svn-remote.svn.fetch trunk/src:refs/remotes/origin/master'), None), 0)), ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server'],), 'codereview.example.com'), ((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'), ((['git', 'config', '--int', '--get', 'branch.working.git-cl-similarity'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'), ((['git', 'config', '--int', '--get', 'branch.working.git-find-copies'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'refs/heads/working'), ((['git', 'config', 'branch.working.merge'],), 'refs/heads/master'), ((['git', 'config', 'branch.working.remote'],), 'origin'), ((['git', 'config', 'branch.working.merge'],), 'refs/heads/master'), ((['git', 'config', 'branch.working.remote'],), 'origin'), ((['git', 'rev-list', '--merges', '--grep=^SVN changes up to revision [0-9]*$', 'refs/remotes/origin/master^!'],), ''), ((['git', 'update-index', '--refresh', '-q'],), ''), ((['git', 'diff-index', '--name-status', 'HEAD'],), ''), ((['git', 'rev-list', '^refs/heads/working', 'refs/remotes/origin/master'],), ''), ((['git', 'log', '--grep=^git-svn-id:', '-1', '--pretty=format:%H'],), '3fc18b62c4966193eb435baabe2d18a3810ec82e'), ((['git', 'rev-list', '^3fc18b62c4966193eb435baabe2d18a3810ec82e', 'refs/remotes/origin/master'],), ''), ((['git', 'merge-base', 'refs/remotes/origin/master', 'HEAD'],), 'fake_ancestor_sha'), ] @classmethod def _dcommit_calls_normal(cls): return [ ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'rev-parse', 'HEAD'],), '00ff397798ea57439712ed7e04ab96e13969ef40'), ((['git', 'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...', '.'],), 'M\tPRESUBMIT.py'), ((['git', 'config', 'branch.working.rietveldissue'],), '12345'), ((['git', 'config', 'branch.working.rietveldpatchset'],), '31137'), ((['git', 'config', 'branch.working.rietveldserver'],), 'codereview.example.com'), ((['git', 'config', 'user.email'],), 'author@example.com'), ((['git', 'config', 'rietveld.tree-status-url'],), ''), ] @classmethod def _dcommit_calls_bypassed(cls): return [ ((['git', 'config', 'branch.working.rietveldissue'],), '12345'), ((['git', 'config', 'branch.working.rietveldserver'],), 'codereview.example.com'), ((['git', 'config', 'rietveld.tree-status-url'],), ''), (('GitClHooksBypassedCommit', 'Issue https://codereview.example.com/12345 bypassed hook when ' 'committing (tree status was "unset")'), None), ] @classmethod def _dcommit_calls_3(cls): return [ ((['git', 'diff', '--no-ext-diff', '--stat', '--find-copies-harder', '-l100000', '-C50', 'fake_ancestor_sha', 'refs/heads/working'],), (' PRESUBMIT.py | 2 +-\n' ' 1 files changed, 1 insertions(+), 1 deletions(-)\n')), (('About to commit; enter to confirm.',), None), ((['git', 'show-ref', '--quiet', '--verify', 'refs/heads/git-cl-commit'],), (('', None), 0)), ((['git', 'branch', '-D', 'git-cl-commit'],), ''), ((['git', 'show-ref', '--quiet', '--verify', 'refs/heads/git-cl-cherry-pick'],), ''), ((['git', 'rev-parse', '--show-cdup'],), '\n'), ((['git', 'checkout', '-q', '-b', 'git-cl-commit'],), ''), ((['git', 'reset', '--soft', 'fake_ancestor_sha'],), ''), ((['git', 'commit', '-m', 'Issue: 12345\n\nR=john@chromium.org\n\n' 'Review URL: https://codereview.example.com/12345'],), ''), ((['git', 'svn', 'dcommit', '-C50', '--no-rebase', '--rmdir'],), (('', None), 0)), ((['git', 'checkout', '-q', 'working'],), ''), ((['git', 'branch', '-D', 'git-cl-commit'],), ''), ] @staticmethod def _cmd_line(description, args, similarity, find_copies, private): """Returns the upload command line passed to upload.RealMain().""" return [ 'upload', '--assume_yes', '--server', 'https://codereview.example.com', '--message', description ] + args + [ '--cc', 'joe@example.com', ] + (['--private'] if private else []) + [ '--git_similarity', similarity or '50' ] + (['--git_no_find_copies'] if find_copies == False else []) + [ 'fake_ancestor_sha', 'HEAD' ] def _run_reviewer_test( self, upload_args, expected_description, returned_description, final_description, reviewers, private=False): """Generic reviewer test framework.""" try: similarity = upload_args[upload_args.index('--similarity')+1] except ValueError: similarity = None if '--find-copies' in upload_args: find_copies = True elif '--no-find-copies' in upload_args: find_copies = False else: find_copies = None private = '--private' in upload_args self.calls = self._upload_calls(similarity, find_copies, private) def RunEditor(desc, _, **kwargs): self.assertEquals( '# Enter a description of the change.\n' '# This will be displayed on the codereview site.\n' '# The first line will also be used as the subject of the review.\n' '#--------------------This line is 72 characters long' '--------------------\n' + expected_description, desc) return returned_description self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor) def check_upload(args): cmd_line = self._cmd_line(final_description, reviewers, similarity, find_copies, private) self.assertEquals(cmd_line, args) return 1, 2 self.mock(git_cl.upload, 'RealMain', check_upload) git_cl.main(['upload'] + upload_args) def test_no_reviewer(self): self._run_reviewer_test( [], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=', 'desc\n\nBUG=', []) def test_keep_similarity(self): self._run_reviewer_test( ['--similarity', '70'], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=', 'desc\n\nBUG=', []) def test_keep_find_copies(self): self._run_reviewer_test( ['--no-find-copies'], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=\n', 'desc\n\nBUG=', []) def test_private(self): self._run_reviewer_test( ['--private'], 'desc\n\nBUG=', '# Blah blah comment.\ndesc\n\nBUG=\n', 'desc\n\nBUG=', []) def test_reviewers_cmd_line(self): # Reviewer is passed as-is description = 'desc\n\nR=foo@example.com\nBUG=' self._run_reviewer_test( ['-r' 'foo@example.com'], description, '\n%s\n' % description, description, ['--reviewers=foo@example.com']) def test_reviewer_tbr_overriden(self): # Reviewer is overriden with TBR # Also verifies the regexp work without a trailing LF description = 'Foo Bar\n\nTBR=reviewer@example.com' self._run_reviewer_test( ['-r' 'foo@example.com'], 'desc\n\nR=foo@example.com\nBUG=', description.strip('\n'), description, ['--reviewers=reviewer@example.com']) def test_reviewer_multiple(self): # Handles multiple R= or TBR= lines. description = ( 'Foo Bar\nTBR=reviewer@example.com\nBUG=\nR=another@example.com') self._run_reviewer_test( [], 'desc\n\nBUG=', description, description, ['--reviewers=another@example.com,reviewer@example.com']) def test_reviewer_send_mail(self): # --send-mail can be used without -r if R= is used description = 'Foo Bar\nR=reviewer@example.com' self._run_reviewer_test( ['--send-mail'], 'desc\n\nBUG=', description.strip('\n'), description, ['--reviewers=reviewer@example.com', '--send_mail']) def test_reviewer_send_mail_no_rev(self): # Fails without a reviewer. stdout = StringIO.StringIO() stderr = StringIO.StringIO() try: self.calls = self._upload_no_rev_calls(None, None) def RunEditor(desc, _, **kwargs): return desc self.mock(git_cl.gclient_utils, 'RunEditor', RunEditor) self.mock(sys, 'stdout', stdout) self.mock(sys, 'stderr', stderr) git_cl.main(['upload', '--send-mail']) self.fail() except SystemExit: self.assertEqual( 'Using 50% similarity for rename/copy detection. Override with ' '--similarity.\n', stdout.getvalue()) self.assertEqual( 'Must specify reviewers to send email.\n', stderr.getvalue()) def test_dcommit(self): self.calls = ( self._dcommit_calls_1() + self._git_sanity_checks('fake_ancestor_sha', 'working') + self._dcommit_calls_normal() + self._dcommit_calls_3()) git_cl.main(['dcommit']) def test_dcommit_bypass_hooks(self): self.calls = ( self._dcommit_calls_1() + self._dcommit_calls_bypassed() + self._dcommit_calls_3()) git_cl.main(['dcommit', '--bypass-hooks']) @classmethod def _gerrit_base_calls(cls): return [ ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server'],), 'codereview.example.com'), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', '--int', '--get', 'branch.master.git-cl-similarity'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', '--int', '--get', 'branch.master.git-find-copies'],), ''), ((['git', 'update-index', '--refresh', '-q'],), ''), ((['git', 'diff-index', '--name-status', 'HEAD'],), ''), ((['git', 'symbolic-ref', 'HEAD'],), 'master'), ((['git', 'config', 'branch.master.merge'],), 'master'), ((['git', 'config', 'branch.master.remote'],), 'origin'), ((['get_or_create_merge_base', 'master', 'master'],), 'fake_ancestor_sha'), ] + cls._git_sanity_checks('fake_ancestor_sha', 'master') + [ ((['git', 'rev-parse', '--show-cdup'],), ''), ((['git', 'rev-parse', 'HEAD'],), '12345'), ((['git', 'diff', '--name-status', '--no-renames', '-r', 'fake_ancestor_sha...', '.'],), 'M\t.gitignore\n'), ((['git', 'config', 'branch.master.rietveldissue'],), ''), ((['git', 'config', 'branch.master.rietveldpatchset'],), ''), ((['git', 'log', '--pretty=format:%s%n%n%b', 'fake_ancestor_sha...'],), 'foo'), ((['git', 'config', 'user.email'],), 'me@example.com'), ((['git', 'diff', '--no-ext-diff', '--stat', '--find-copies-harder', '-l100000', '-C50', 'fake_ancestor_sha', 'HEAD'],), '+dat'), ] @staticmethod def _gerrit_upload_calls(description, reviewers): calls = [ ((['git', 'config', 'gerrit.host'],), 'gerrit.example.com'), ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), description) ] if git_cl.CHANGE_ID not in description: calls += [ ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), description), ((['git', 'commit', '--amend', '-m', description],), ''), ((['git', 'log', '--pretty=format:%s\n\n%b', 'fake_ancestor_sha..HEAD'],), description) ] calls += [ ((['git', 'rev-list', 'origin/master..'],), ''), ((['git', 'config', 'rietveld.cc'],), '') ] receive_pack = '--receive-pack=git receive-pack ' receive_pack += '--cc=joe@example.com' # from watch list if reviewers: receive_pack += ' ' receive_pack += ' '.join( '--reviewer=' + email for email in sorted(reviewers)) receive_pack += '' calls += [ ((['git', 'push', receive_pack, 'origin', 'HEAD:refs/for/master'],), '') ] return calls def _run_gerrit_upload_test( self, upload_args, description, reviewers): """Generic gerrit upload test framework.""" self.calls = self._gerrit_base_calls() self.calls += self._gerrit_upload_calls(description, reviewers) git_cl.main(['upload'] + upload_args) def test_gerrit_upload_without_change_id(self): self._run_gerrit_upload_test( [], 'desc\n\nBUG=\n', []) def test_gerrit_no_reviewer(self): self._run_gerrit_upload_test( [], 'desc\n\nBUG=\nChange-Id:123456789\n', []) def test_gerrit_reviewers_cmd_line(self): self._run_gerrit_upload_test( ['-r', 'foo@example.com'], 'desc\n\nBUG=\nChange-Id:123456789', ['foo@example.com']) def test_gerrit_reviewer_multiple(self): self._run_gerrit_upload_test( [], 'desc\nTBR=reviewer@example.com\nBUG=\nR=another@example.com\n' 'Change-Id:123456789\n', ['reviewer@example.com', 'another@example.com']) def test_config_gerrit_download_hook(self): self.mock(git_cl, 'FindCodereviewSettingsFile', CodereviewSettingsFileMock) def ParseCodereviewSettingsContent(content): keyvals = {} keyvals['CODE_REVIEW_SERVER'] = 'gerrit.chromium.org' keyvals['GERRIT_HOST'] = 'gerrit.chromium.org' keyvals['GERRIT_PORT'] = '29418' return keyvals self.mock(git_cl.gclient_utils, 'ParseCodereviewSettingsContent', ParseCodereviewSettingsContent) self.mock(git_cl.os, 'access', self._mocked_call) self.mock(git_cl.os, 'chmod', self._mocked_call) src_dir = os.path.join(os.path.sep, 'usr', 'local', 'src') def AbsPath(path): if not path.startswith(os.path.sep): return os.path.join(src_dir, path) return path self.mock(git_cl.os.path, 'abspath', AbsPath) commit_msg_path = os.path.join(src_dir, '.git', 'hooks', 'commit-msg') def Exists(path): if path == commit_msg_path: return False # others paths, such as /usr/share/locale/.... return True self.mock(git_cl.os.path, 'exists', Exists) self.mock(git_cl, 'urlretrieve', self._mocked_call) self.mock(git_cl, 'hasSheBang', self._mocked_call) self.calls = [ ((['git', 'config', 'rietveld.autoupdate'],), ''), ((['git', 'config', 'rietveld.server', 'gerrit.chromium.org'],), ''), ((['git', 'config', '--unset-all', 'rietveld.cc'],), ''), ((['git', 'config', '--unset-all', 'rietveld.private'],), ''), ((['git', 'config', '--unset-all', 'rietveld.tree-status-url'],), ''), ((['git', 'config', '--unset-all', 'rietveld.viewvc-url'],), ''), ((['git', 'config', '--unset-all', 'rietveld.bug-prefix'],), ''), ((['git', 'config', '--unset-all', 'rietveld.cpplint-regex'],), ''), ((['git', 'config', '--unset-all', 'rietveld.cpplint-ignore-regex'],), ''), ((['git', 'config', 'gerrit.host', 'gerrit.chromium.org'],), ''), # DownloadHooks(False) ((['git', 'config', 'gerrit.host'],), 'gerrit.chromium.org'), ((['git', 'rev-parse', '--show-cdup'],), ''), ((commit_msg_path, os.X_OK,), False), (('https://gerrit-review.googlesource.com/tools/hooks/commit-msg', commit_msg_path,), ''), ((commit_msg_path,), True), ((commit_msg_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR,), ''), # GetCodereviewSettingsInteractively ((['git', 'config', 'rietveld.server'],), 'gerrit.chromium.org'), (('Rietveld server (host[:port]) [https://gerrit.chromium.org]:',), ''), ((['git', 'config', 'rietveld.cc'],), ''), (('CC list:',), ''), ((['git', 'config', 'rietveld.private'],), ''), (('Private flag (rietveld only):',), ''), ((['git', 'config', 'rietveld.tree-status-url'],), ''), (('Tree status URL:',), ''), ((['git', 'config', 'rietveld.viewvc-url'],), ''), (('ViewVC URL:',), ''), # DownloadHooks(True) ((['git', 'config', 'rietveld.bug-prefix'],), ''), (('Bug Prefix:',), ''), ((commit_msg_path, os.X_OK,), True), ] git_cl.main(['config']) def test_update_reviewers(self): data = [ ('foo', [], 'foo'), ('foo\nR=xx', [], 'foo\nR=xx'), ('foo\nTBR=xx', [], 'foo\nTBR=xx'), ('foo', ['a@c'], 'foo\n\nR=a@c'), ('foo\nR=xx', ['a@c'], 'foo\n\nR=a@c, xx'), ('foo\nTBR=xx', ['a@c'], 'foo\n\nR=a@c\nTBR=xx'), ('foo\nTBR=xx\nR=yy', ['a@c'], 'foo\n\nR=a@c, yy\nTBR=xx'), ('foo\nBUG=', ['a@c'], 'foo\nBUG=\nR=a@c'), ('foo\nR=xx\nTBR=yy\nR=bar', ['a@c'], 'foo\n\nR=a@c, xx, bar\nTBR=yy'), ('foo', ['a@c', 'b@c'], 'foo\n\nR=a@c, b@c'), ('foo\nBar\n\nR=\nBUG=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='), ('foo\nBar\n\nR=\nBUG=\nR=', ['c@c'], 'foo\nBar\n\nR=c@c\nBUG='), # Same as the line before, but full of whitespaces. ( 'foo\nBar\n\n R = \n BUG = \n R = ', ['c@c'], 'foo\nBar\n\nR=c@c\n BUG =', ), # Whitespaces aren't interpreted as new lines. ('foo BUG=allo R=joe ', ['c@c'], 'foo BUG=allo R=joe\n\nR=c@c'), ] expected = [i[2] for i in data] actual = [] for orig, reviewers, _expected in data: obj = git_cl.ChangeDescription(orig) obj.update_reviewers(reviewers) actual.append(obj.description) self.assertEqual(expected, actual) if __name__ == '__main__': git_cl.logging.basicConfig( level=git_cl.logging.DEBUG if '-v' in sys.argv else git_cl.logging.ERROR) unittest.main()
cybertk/depot_tools
tests/git_cl_test.py
Python
bsd-3-clause
26,857
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import json import requests from odoo import api, fields, models from odoo.exceptions import AccessDenied, UserError from odoo.addons.auth_signup.models.res_users import SignupError from odoo.addons import base base.models.res_users.USER_PRIVATE_FIELDS.append('oauth_access_token') class ResUsers(models.Model): _inherit = 'res.users' oauth_provider_id = fields.Many2one('auth.oauth.provider', string='OAuth Provider') oauth_uid = fields.Char(string='OAuth User ID', help="Oauth Provider user_id", copy=False) oauth_access_token = fields.Char(string='OAuth Access Token', readonly=True, copy=False) _sql_constraints = [ ('uniq_users_oauth_provider_oauth_uid', 'unique(oauth_provider_id, oauth_uid)', 'OAuth UID must be unique per provider'), ] @api.model def _auth_oauth_rpc(self, endpoint, access_token): return requests.get(endpoint, params={'access_token': access_token}).json() @api.model def _auth_oauth_validate(self, provider, access_token): """ return the validation data corresponding to the access token """ oauth_provider = self.env['auth.oauth.provider'].browse(provider) validation = self._auth_oauth_rpc(oauth_provider.validation_endpoint, access_token) if validation.get("error"): raise Exception(validation['error']) if oauth_provider.data_endpoint: data = self._auth_oauth_rpc(oauth_provider.data_endpoint, access_token) validation.update(data) return validation @api.model def _generate_signup_values(self, provider, validation, params): oauth_uid = validation['user_id'] email = validation.get('email', 'provider_%s_user_%s' % (provider, oauth_uid)) name = validation.get('name', email) return { 'name': name, 'login': email, 'email': email, 'oauth_provider_id': provider, 'oauth_uid': oauth_uid, 'oauth_access_token': params['access_token'], 'active': True, } @api.model def _auth_oauth_signin(self, provider, validation, params): """ retrieve and sign in the user corresponding to provider and validated access token :param provider: oauth provider id (int) :param validation: result of validation of access token (dict) :param params: oauth parameters (dict) :return: user login (str) :raise: AccessDenied if signin failed This method can be overridden to add alternative signin methods. """ oauth_uid = validation['user_id'] try: oauth_user = self.search([("oauth_uid", "=", oauth_uid), ('oauth_provider_id', '=', provider)]) if not oauth_user: raise AccessDenied() assert len(oauth_user) == 1 oauth_user.write({'oauth_access_token': params['access_token']}) return oauth_user.login except AccessDenied as access_denied_exception: if self.env.context.get('no_user_creation'): return None state = json.loads(params['state']) token = state.get('t') values = self._generate_signup_values(provider, validation, params) try: _, login, _ = self.signup(values, token) return login except (SignupError, UserError): raise access_denied_exception @api.model def auth_oauth(self, provider, params): # Advice by Google (to avoid Confused Deputy Problem) # if validation.audience != OUR_CLIENT_ID: # abort() # else: # continue with the process access_token = params.get('access_token') validation = self._auth_oauth_validate(provider, access_token) # required check if not validation.get('user_id'): # Workaround: facebook does not send 'user_id' in Open Graph Api if validation.get('id'): validation['user_id'] = validation['id'] else: raise AccessDenied() # retrieve and sign in user login = self._auth_oauth_signin(provider, validation, params) if not login: raise AccessDenied() # return user credentials return (self.env.cr.dbname, login, access_token) def _check_credentials(self, password, env): try: return super(ResUsers, self)._check_credentials(password, env) except AccessDenied: passwd_allowed = env['interactive'] or not self.env.user._rpc_api_keys_only() if passwd_allowed and self.env.user.active: res = self.sudo().search([('id', '=', self.env.uid), ('oauth_access_token', '=', password)]) if res: return raise def _get_session_token_fields(self): return super(ResUsers, self)._get_session_token_fields() | {'oauth_access_token'}
jeremiahyan/odoo
addons/auth_oauth/models/res_users.py
Python
gpl-3.0
5,112
#!/usr/bin/env python2 import getpass import sys import telnetlib # Credentials needed to the telnet connection user = raw_input("Enter your telnet account: ") password = getpass.getpass() f = open('switches.txt') # Put the IP addrs to loop the conn for line in f: print "[+] Telnet to host: " + line HOST = line # Connection using the telnetlib tn = telnetlib.Telnet(HOST) tn.read_until("Username: ") tn.write(user + "\n") if password: tn.read_until("Password: ") tn.write(password + "\n") # Cisco configuration tn.write("conf t\n") for n in range(2, 15): tn.write("vlan " + str(n) + "\n") tn.write("name pyVlan_" + str(n) + "\n") tn.write("end\n") tn.write("exit\n") print tn.read_all()
exv1p3r/PyScripts
pyNet/pyMultiSW2.py
Python
gpl-3.0
777
#! /usr/bin/env python """Unit test suite for smartcard python framework over pcsc. __author__ = "http://www.gemalto.com" Copyright 2001-2012 gemalto Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com This file is part of pyscard. pyscard is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. pyscard is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with pyscard; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ import sys import unittest # so that we can locate configcheck sys.path += ['..'] import configcheck def suite(): modules_to_test = ( 'testcase_pcscreadergroups', ) testsuite_framework = unittest.TestSuite() for module in map(__import__, modules_to_test): testsuite_framework.addTest(unittest.findTestCases(module)) return testsuite_framework if __name__ == '__main__': configcheck.checklocalconfig() unittest.main(defaultTest='suite')
moreati/pyscard
smartcard/test/frameworkpcsc/testsuite_frameworkpcsc.py
Python
lgpl-2.1
1,428
import datetime from django import http from django.conf import settings from django.forms.models import modelform_factory from django.shortcuts import get_object_or_404 from django.views.generic.list_detail import object_list from django.views.generic.simple import direct_to_template from django.core.urlresolvers import reverse from simple_comments import forms as comment_forms NOTIFICATION_LABEL = 'simple_comments_comment' class CommentConfiguration(object): """A set of basic configuration options for handling comments. Subclass this class to create your own custom behaviour. There are three builtin levels of spam prevention: ``use_akismet``, ``use_control_question`` and ``use_honeypot`` are all boolean attributes that allows enabling of spam prevention. Override ``get_spam_prevention_forms`` in a subclass to add custom spam prevention mechanisms. ``user_comments`` determines whether a user must be registered in order to post comments. It will also remove the need for a poster to fill in otherwise mandatory fields such as name and email. ``user_can_delete`` determines if users should be able to delete their own comments or not. ``autoclose_after`` determines how many days should pass after the target was created before comments should be closed. ``autoclose_field_name`` is the name of a date/datetime field on the target model that specifies when the target was created. ``prevent_duplicates`` dictates whether some measures should be taken against duplicate comments. ``allow_comments_field_name`` is a boolean field on the target model that, when evaluating to ``False``, prevents comments from being posted. ``send_notifications`` dictates whether notifications should be sent or not. """ template_object_name = 'comment' preview_template_name = 'simple_comments/comment_preview.html' form_template_name = 'simple_comments/comment_form.html' list_template_name = 'simple_comments/comment_list.html' deleted_template_namae = 'simple_comments/comment_deleted.html' posted_template_name = 'simple_comments/comment_posted.html' use_akismet = False use_control_question = False use_honeypot = False user_comments = False user_can_delete = False autoclose_after = None autoclose_after_field_name = None prevent_duplicates = True allow_comments_field_name = None send_notifications = False # require_moderation = False # confirm_delete = True # comment_markup order_by = 'pub_date' paginate_by = 25 def __init__(self, configuration_key, model): self.configuration_key = configuration_key self.model = model def get_exclude(self): """Return a list of fields to exclude when generating a form using ``get_form()``. Defaults to the basic fields of the ``BaseComment`` we want to exclude. Subclasses may override this method to alter the list of fields to exlcude, albeit it's probably easier to just set an ``exclude`` attribute. """ exclude = ['user', 'user_username', 'pub_date', 'ip_address', 'target'] if self.user_comments: exclude = exclude + ['author_name', 'author_email', 'author_website'] return exclude exclude = property(fget=lambda self: self.get_exclude()) def days_since_target_was_published(self, target): """Return the number of days that have passed since ``target`` was published. """ now = datetime.datetime.now() published = getattr(target, self.autoclose_after_field_name) diff = datetime.date(now.year, now.month, now.day) - \ datetime.date(published.year, published.month, published.day) return diff.days def allow_comments(self, target): """Return a boolean dictating whether comments are allowed for ``target`` or not. """ if self.allow_comments_field_name is not None and \ not getattr(target, self.allow_comments_field_name): return False if self.autoclose_after_field_name is not None and \ self.autoclose_after is not None: days_since = self.days_since_target_was_published(target) return days_since < self.autoclose_after return True def get_target_owner(self, target): """Return the owner (``User`` instance) of target.""" return None def get_duplicate(self, target, comment): """Try to determine if a duplicate of `comment` exists. If entries posted by the same author, with the same content, exist for the same day the latest "duplicate" record is returned. Otherwise return ``None``. This method should be overridden if a custom model (that requires custom checks) is used. """ filter_kwargs = { 'user': comment.user, 'author_name': comment.author_name, 'author_email': comment.author_email, 'author_website': comment.author_website, 'target': target, } queryset = comment._default_manager.filter(**filter_kwargs) queryset = queryset.order_by('-pub_date') if queryset.count(): latest = queryset[0] if latest.pub_date.date() == comment.pub_date.date() and \ latest.body == comment.body: return latest return None def get_post_save_redirect_url(self, target, comment): """Return a URL to redirect to after a successful comment save.""" return reverse('simple_comments_comment_posted', args=[self.configuration_key, target.pk, comment.pk]) def get_post_delete_redirect_url(self, target): """Return a URL to redirect to after a successful comment delete.""" return reverse('simple_comments_comment_deleted', args=[self.configuration_key, target.pk]) def get_form(self): """Return a form-class to use when creating comments. Subclasses can override this method to return a custom form. """ return modelform_factory(self.model, fields=None, exclude=self.exclude) def get_spam_prevention_forms(self): """Return a list containing spam prevention forms.""" forms = [] if self.use_akismet: forms.append(comment_forms.AkismetForm) if self.use_control_question: forms.append(comment_forms.EarTriviaForm) if self.use_honeypot: forms.append(comment_forms.HoneypotForm) return forms def has_permission_to_delete(self, comment, user, request=None): """Return a boolean dictating whether a user has permission to delete a comment or not. """ if user is None or user.is_anonymous(): return False target_owner = self.get_target_owner(comment.target) if self.user_can_delete and \ (comment.user == user or target_owner == user): return True if request is not None: opts = comment.target._meta perm = '%s.%s' % (opts.app_label, opts.get_delete_permission()) if request.user.has_perm(perm): return True return False def get_notification_users(self, target): """Return an iterable of ``User`` instances that should be notified when a comment is made on ``target``. """ return [self.get_target_owner(target)] def dispatch_notifications(self, comment): users = self.get_notification_users(comment.target) if not self.send_notifications or \ "notification" not in settings.INSTALLED_APPS or not users: return False from notification import models as notification context = { 'comment': comment, 'verbose_name': comment.target._meta.verbose_name, } notification.send(users, NOTIFICATION_LABEL, context) return True # Views def create_comment(self, request, target_id, extra_context=None): target = get_object_or_404(self.model.get_target_model(), pk=target_id) if not self.allow_comments(target) or \ (self.user_comments and not request.user.is_authenticated()): return http.HttpResponseForbidden() extra_context = extra_context or {} is_preview = request.POST.get('submit', '').lower() == 'preview' or \ request.POST.get('preview', None) is not None extra_context.update({ 'is_preview': is_preview }) data = request.POST or None form = self.get_form()(data=data) spam_prevention_forms = [f(request=request, data=data) for f in \ self.get_spam_prevention_forms()] is_valid = form.is_valid() and \ all([f.is_valid() for f in spam_prevention_forms]) extra_context.update({ 'form': form, 'spam_prevention_forms': spam_prevention_forms, 'target': target, 'configuration': self, }) if not is_valid or request.method == 'GET': return direct_to_template(request, template=self.form_template_name, extra_context=extra_context) # Do note that we're not actually persisting the instance here, we're # calling save because we need an instance when we render the preview # template. comment = form.save(commit=False) if self.user_comments: comment.user = request.user comment.denormalize_user_instance() comment.target = target comment.ip_address = request.META.get("REMOTE_ADDR", None) extra_context = extra_context or {} extra_context.update({ self.template_object_name: comment }) if is_preview: return direct_to_template(request, template=self.preview_template_name, extra_context=extra_context) # Try to prevent accidental duplicate postings by finding a *very* # similar comment and use that instead of saving a new one. duplicate = self.get_duplicate(target, comment) if duplicate is not None: comment = duplicate else: comment.save() self.dispatch_notifications(comment) post_save_redirect_url = self.get_post_save_redirect_url(target, comment) return http.HttpResponseRedirect(post_save_redirect_url) def delete_comment(self, request, target_id, comment_id): target = get_object_or_404(self.model.get_target_model(), pk=target_id) comment = get_object_or_404(self.model, pk=comment_id) if not self.has_permission_to_delete(comment, request.user, request): return http.HttpResponseForbidden() comment.delete() post_delete_redirect_url = \ config.get_post_delete_redirect_url(target) return http.HttpResponseRedirect(post_delete_redirect_url) def comment_list(self, request, target_id=None, extra_context=None): queryset = self.model._default_manager.all().select_related() queryset = queryset.order_by(self.order_by) if target_id is not None: queryset = queryset.filter(target=target_id) extra_context = extra_context or {} extra_context.update({ 'target_id': target_id, 'configuration': self, }) return object_list(request, queryset=queryset, paginate_by=self.paginate_by, template_object_name=self.template_object_name, template_name=self.list_template_name, extra_context=extra_context) def comment_posted(self, request, target_id, comment_id, extra_context=None): target = get_object_or_404(self.model.get_target_model(), pk=target_id) comment = get_object_or_404(self.model, pk=comment_id) extra_context = extra_context or {} return direct_to_template(request, template=self.posted_template_name, extra_context=extra_context) def comment_deleted(self, request, target_id, extra_context=None): target = get_object_or_404(self.model.get_target_model(), pk=target_id) extra_context = extra_context or {} return direct_to_template(request, template=self.deleted_template_name, extra_context=extra_context) class CommentConfigurationAlreadyRegistered(Exception): pass class CommentConfigurationNotRegistered(Exception): pass class CommentConfigurations(object): """Register comment models and configurations.""" __shared_state = { 'configurations': {}, } def __init__(self): self.__dict__ = self.__shared_state def register(self, configuration_key, comment_model, configuration_class=CommentConfiguration): """Register ``comment_model`` and ``configuration_class`` against ``configuration_key``. If configuration class is not given the default ``CommentConfiguration`` will be used. """ try: self.configurations[configuration_key] raise CommentConfigurationAlreadyRegistered except KeyError: configuration = configuration_class(configuration_key, comment_model) self.configurations[configuration_key] = configuration def unregister(self, configuration_key): """Unregister model and configuration matching ``configuration_key``.""" try: del(self.configurations[configuration_key]) except KeyError: raise CommentConfigurationNotRegistered def get_configuration(self, configuration_key): """Return the comment model and configuration associated with ``configuration_key``. """ try: return self.configurations[configuration_key] except KeyError: raise CommentConfigurationNotRegistered def all_configurations(self): return self.configurations.items() configurations = CommentConfigurations() all_configurations= configurations.all_configurations register = configurations.register unregister = configurations.unregister get_configuration = configurations.get_configuration
strange/django-simple-comments
simple_comments/comments.py
Python
bsd-3-clause
15,043
''' Plot angular coverage This script (`ara-coverage`) generates angular histogram tabulating the number of projections within a discrete area on a spherical surface. This angular histogram can be displayed in a number of ways: - 2D map projection (default) - 3D scatter plot - Cylinders surrounding a volume in Chimera The number of projections is represented by both the color and size of circle/cylinder in each representation. An 'x' is used to denote a view with no projections in the 2D project plot. Examples ======== .. sourcecode :: sh $ ara-coverage relion_data.star -o plot.png $ ara-coverage relion_data.star -o plot.bild --chimera $ ara-coverage relion_data.star -o plot3d.png --projection 3d Critical Options ================ .. program:: ara-coverage .. option:: -i <FILENAME1,FILENAME2>, --input-files <FILENAME1,FILENAME2>, FILENAME1 FILENAME2 List of input filenames If you use the parameters `-i` or `--inputfiles` the filenames may be comma or space separated on the command line; they must be comma seperated in a configuration file. Note, these flags are optional for input files; the filenames must be separated by spaces. For a very large number of files (>5000) use `-i "filename*"` .. option:: -o <FILENAME>, --output <FILENAME> Output template for the enumerated filename (e.g. mic_0000.mrc) .. option:: -s <FILENAME>, --selection-file <FILENAME> Selection file for projections in alignment file Plot Options ============ .. program:: ara-coverage .. option:: -p, --projection <CHOICE> Map projection type. See below for more details on available map projections. .. option:: -d, --dpi <int> Resolution of the image in dots per inch .. option:: --count-mode <Shape|Color|Both> Mode to measure the number of projections per view (Currently only works for map projection output) .. option:: --hide-zero-marker <BOOL> Hide the zero markers (Currently does not work for Chimera BILD output) .. option:: --color-map <CHOICE> Set the color map. See below for more details on available color maps. Histogram Options ================= .. option:: --view-resolution <INT> Group views into a coarse grid: (2) 15 deg, (3) 7.5 deg ... .. option:: --disable-mirror <BOOL> Disable mirroring over the equator for counting Chimera Options =============== .. option:: --chimera <BOOL> Write out Chimera bild file .. option:: --particle-radius <FLOAT> Radius from center for ball projections .. option:: --particle-center <FLOAT> Offset from center for ball projections (zero means use radius) Projection Options ================== .. option:: --area-mult <BOOL> Circle area multiplier .. option:: --alpha <FLOAT> Transparency of the marker (1.0 = solid, 0.0 = no color) .. option:: --label-view <LIST> List of views to label with number and Euler Angles (theta,phi) .. option:: --use-scale <BOOL> Display scale and color instead of color bar Layout Options ============== .. option:: --lon-zero <FLOAT> Longitude for axis zero (empty for default values determined by projection) .. option:: --lat-zero <FLOAT> Latitude for axis zero (empty for default values determined by projection) .. option:: --ll-lat <FLOAT> Latitude of lower left hand corner of the desired map domain (degrees) .. option:: --ll-lon <FLOAT> Longitude of lower left hand corner of the desired map domain (degrees) .. option:: -ur-lat <FLOAT> Latitude of upper right hand corner of the desired map domain (degrees) .. option:: --ur-lon <FLOAT> Longitude of upper right hand corner of the desired map domain (degrees) .. option:: --boundinglat <FLOAT> Bounding latitude for npstere,spstere,nplaea,splaea,npaeqd,spaeqd .. option:: --proj-width <INT> Width of desired map domain in projection coordinates (meters) .. option:: --proj-height <INT> Height of desired map domain in projection coordinates (meters) Other Options ============= This is not a complete list of options available to this script, for additional options see: #. :ref:`Options shared by all scripts ... <shared-options>` Additional Option Information ============================= Support color maps ------------------ Here is an limited selection of the available color maps: .. image:: http://matplotlib.org/_images/colormaps_reference_00.png .. note:: A full description of the color maps can be found at: http://matplotlib.org/examples/color/colormaps_reference.html Supported Projection Plots --------------------------- The following are supported projections for the `:option:-p` option. ======= ================================= Option Description ======= ================================= aeqd Azimuthal Equidistant poly Polyconic gnom Gnomonic moll Mollweide tmerc Transverse Mercator nplaea North-Polar Lambert Azimuthal gall Gall Stereographic Cylindrical mill Miller Cylindrical merc Mercator stere Stereographic npstere North-Polar Stereographic hammer Hammer geos Geostationary nsper Near-Sided Perspective vandg van der Grinten laea Lambert Azimuthal Equal Area mbtfpq McBryde-Thomas Flat-Polar Quartic sinu Sinusoidal spstere South-Polar Stereographic lcc Lambert Conformal npaeqd North-Polar Azimuthal Equidistant eqdc Equidistant Conic cyl Cylindrical Equidistant omerc Oblique Mercator aea Albers Equal Area spaeqd South-Polar Azimuthal Equidistant ortho Orthographic cass Cassini-Soldner splaea South-Polar Lambert Azimuthal robin Robinson ======= ================================= .. note:: For more information concerning the map projections, see http://matplotlib.github.com/basemap/users/mapsetup.html .. Created on Aug 27, 2013 .. codeauthor:: Robert Langlois <rl2528@columbia.edu> ''' from ..core.util.matplotlib_nogui import pylab from ..core.app import program from ..core.metadata import format from ..core.metadata import format_utility from ..core.orient import healpix from ..core.orient import spider_transforms from mpl_toolkits import basemap from mpl_toolkits import mplot3d import matplotlib.cm as cm import matplotlib.cm import matplotlib.lines import matplotlib.font_manager import scipy.io import logging import numpy import os _logger = logging.getLogger(__name__) _logger.setLevel(logging.DEBUG) def batch(files, output, dpi, chimera=False, **extra): ''' Generate an angular histogram for the given alignment files :Parameters: files : list List of input alignment files output : str Output filename for angular histogram dpi : float Dots per inch for plots chimera : bool Output in chimera bild format ''' outputn=output mapargs = projection_args(**extra) if extra['projection'].lower() != '3d' else None if chimera or mapargs is None: del extra['use_mirror'] i=0 for filename in files: if len(files) > 1: outputn = format_utility.new_filename(output, suffix=os.path.splitext(os.path.basename(filename))[0], ext='.png') angs = read_angles(filename, **extra) angs,cnt = angular_histogram(angs, **extra) if chimera: chimera_bild(angs, cnt, outputn, **extra) elif mapargs is None: fig = pylab.figure(i, dpi=dpi) scatterEuler3d(fig, angs, cnt, **extra) fig.savefig(outputn, dpi=dpi) else: _logger.info("%s has %d missing views"%(os.path.basename(filename), numpy.sum(cnt < 1))) fig = pylab.figure(i, dpi=dpi) fig.add_axes([0.05,0.05,0.9,0.9]) plot_angles(angs, cnt, mapargs, **extra) fig.savefig(outputn, dpi=dpi) _logger.info("Completed") def scatterEuler3d(fig, angs, cnt, color_map='cool', hide_zero_marker=False, **extra): ''' Plot the angular histogram using a 3D scatter plot :Parameters: fig : Figure Matplotlib figure handle angs : array Array of view angles cnt : array Histogram for each view angle color_map : str Name of color map hide_zero_marker : bool If true, hide the zero projection count marker extra : dict Unused keyword arguments ''' cmap = getattr(cm, color_map) cnt = cnt.astype(numpy.float) nhist = cnt.copy() if nhist.min() != nhist.max(): nhist-=nhist.min() nhist/=nhist.max() ax = mplot3d.Axes3D(fig) data = numpy.zeros((len(angs), 3)) for i in xrange(len(angs)): if i == 0: print angs[i, :] data[i, :] = spider_transforms.euler_to_vector(*angs[i, :]) nonzero = numpy.nonzero(cnt) ax.scatter3D(data[nonzero, 0].ravel(), data[nonzero, 1].ravel(), data[nonzero, 2].ravel(), c=nhist, cmap=cmap) if not hide_zero_marker: nonzero = numpy.nonzero(cnt==0) if len(nonzero) > 0: ax.scatter3D(data[nonzero, 0].ravel(), data[nonzero, 1].ravel(), data[nonzero, 2].ravel(), color=cm.gray(0.5), marker='x') # @UndefinedVariable def chimera_bild(angs, cnt, output, particle_diameter=60.0, particle_center=0.0, radius_frac=1.0, width_frac=0.5, color_map='cool', view_resolution=3, **extra): '''Write out angular histogram has a Chimera BILD file :Parameters: angs : array Array of view angles cnt : array Histogram for each view angle output : str Output filename particle_diameter : float Diameter of paritlce in angstroms particle_center : float Ceneter of particle in angstroms radius_frac : float Radius scaling factor width_frac : float Cylinder width scaling factor color_map : str Name of color map view_resolution : int HealPix resolution extra : dict Unused keyword arguments ''' #double offset = ori_size * pixel_size / 2.; cmap = getattr(cm, color_map) output = os.path.splitext(output)[0]+'.bild' fout = open(output, 'w') maxcnt = cnt.max() particle_radius = particle_diameter/2.0 width = width_frac * numpy.pi*particle_radius/healpix.sampling(view_resolution) try: for i in xrange(len(angs)): val = cnt[i]/float(maxcnt) r, g, b = cmap(val)[:3] fout.write('.color %f %f %f\n'%(r, g, b)) v1,v2,v3 = spider_transforms.euler_to_vector(*angs[i, :]) length = particle_radius + radius_frac * particle_radius * val; diff = particle_radius-length if abs(diff*v1) < 0.01 and abs(diff*v2) < 0.01 and abs(diff*v3) < 0.01: continue fout.write('.cylinder %f %f %f %f %f %f %d\n'%(particle_radius*v1+particle_center, particle_radius*v2+particle_center, particle_radius*v3+particle_center, length*v1+particle_center, length*v2+particle_center, length*v3+particle_center, width)) finally: fout.close() def plot_angles(angs, hist, mapargs, color_map='cool', area_mult=1.0, alpha=0.9, hide_zero_marker=False, use_scale=False, label_view=[], **extra): ''' Plot the angular histogram using a map projection from basemap .. note:: Basemap uses longitude latitude conventions, but the given angles are in colatitude, longitude convention. :Parameters: angs : array Array of view angles cnt : array Histogram for each view angle mapargs : dict Arguments specific to a map projection in basemap color_map : str Name of color map area_mult : float Scaling factor for size display alpha : float Transparency factor hide_zero_marker : bool If true, hide the zero projection count marker use_scale : bool If true, then display scale for size and color label_view : list Label each view with text extra : dict Unused keyword arguments ''' cmap = getattr(cm, color_map) m = basemap.Basemap(**mapargs) # Y -> latitude # Z -> longitude #longitude, latitude = 90-colatitude x, y = m(angs[:, 1], 90.0-angs[:, 0]) sel = hist < 1 hist = hist.astype(numpy.float) s = numpy.sqrt(hist)*area_mult nhist = hist.copy() nhist-=nhist.min() nhist/=nhist.max() m.drawparallels(numpy.arange(-90.,120.,30.)) m.drawmeridians(numpy.arange(0.,420.,60.)) im = m.scatter(x, y, s=s, marker="o", c=cmap(nhist), alpha=alpha, edgecolors='none') font_tiny=matplotlib.font_manager.FontProperties() font_tiny.set_size('xx-small') if len(label_view) > 0: for i in label_view: if i > len(angs): _logger.warn("Cannot label view: %d when there are only %d views (skipping)"%(i, len(angs))) continue ytext = -15 if i%2==0 else 15 pylab.annotate('%d: %.1f,%.1f'%(i+1, angs[i, 0], angs[i, 1]), xy=(x[i],y[i]), xycoords='data', xytext=(-15, ytext), textcoords='offset points', arrowprops=dict(arrowstyle="->"), fontproperties =font_tiny ) if numpy.sum(sel) > 0 and not hide_zero_marker: im = m.scatter(x[sel], y[sel], numpy.max(s), marker="x", c=cm.gray(0.5))#@UndefinedVariable if not use_scale: im = matplotlib.cm.ScalarMappable(cmap=cmap, norm=matplotlib.colors.Normalize()) im.set_array(hist) m.colorbar(im, "right", size="3%", pad='1%') else: fontP=matplotlib.font_manager.FontProperties() fontP.set_size('small') inc = len(hist)/10 lines = [] labels = [] idx = numpy.argsort(hist)[::-1] for j in xrange(0, len(hist), inc): i=idx[j] labels.append("%s"%hist[i]) lines.append(matplotlib.lines.Line2D(range(1), range(1), color=cmap(nhist[i]), marker='o', markersize=s[i]/2, linestyle='none', markeredgecolor='white')) if numpy.sum(sel) > 0 and not hide_zero_marker: i=idx[len(idx)-1] labels.append("%s"%hist[i]) lines.append(matplotlib.lines.Line2D(range(1), range(1), color=cm.gray(0.5), marker='x', markersize=numpy.max(s)/5, linestyle='none')) #@UndefinedVariable pylab.legend(tuple(lines),tuple(labels), numpoints=1, frameon=False, loc='center left', bbox_to_anchor=(1, 0.5), prop = fontP) #l.get_lines()[0]._legmarker.set_ms(numpy.max(s)) def projection_args(projection, lat_zero, lon_zero, ll_lon, ll_lat, ur_lon, ur_lat, proj_width=0, proj_height=0, boundinglat=0, **extra): ''' Get default values for various projection types :Parameters: projection : str Name of the map projection mode lat_zero : float Latitude for axis zero lon_zero : float Longitude for axis zero ll_lon : float Longitude of lower left hand corner of the desired map domain (degrees) ll_lat : float Latitude of lower left hand corner of the desired map domain (degrees) ur_lon : float Longitude of upper right hand corner of the desired map domain (degrees) ur_lat : float Latitude of upper right hand corner of the desired map domain (degrees) proj_width : int Width of desired map domain in projection coordinates (meters) proj_height : int Height of desired map domain in projection coordinates (meters) boundinglat : int Bounding latitude for npstere,spstere,nplaea,splaea,npaeqd,spaeqd extra : dict Unused keyword arguments :Returns: args : dict Keyword dictionary for each parameter value pair ''' if projection == 'hammer': lat_zero = -90.0 if not lat_zero else float(lat_zero) lon_zero = 90.0 if not lon_zero else float(lon_zero) boundinglat = 0 if not boundinglat else float(boundinglat) elif projection == 'ortho': lat_zero = 90.0 if not lat_zero else float(lat_zero) lon_zero = 0.0 if not lon_zero else float(lon_zero) boundinglat = 0 if not boundinglat else float(boundinglat) elif projection == 'npstere': lat_zero = 90.0 if not lat_zero else float(lat_zero) lon_zero = 1.0 if not lon_zero else float(lon_zero) boundinglat = 0.000000001 if not boundinglat else float(boundinglat) else: if not lat_zero or not lon_zero: _logger.warning("No default for %s projection - setting to zeros"%projection) lat_zero = 0.0 if not lat_zero else float(lat_zero) lon_zero = 0.0 if not lon_zero else float(lon_zero) _logger.info("Map Projection: %s"%projection) _logger.info("Longitude 0: %f - Latitude 0: %f - Bounding Lat: %f"%(lon_zero, lat_zero, boundinglat)) param = dict(projection=projection, lat_0=lat_zero, lon_0=lon_zero, llcrnrlon=ll_lon, llcrnrlat=ll_lat, urcrnrlon=ur_lon, urcrnrlat=ur_lat, celestial=False) if proj_width > 0: param['width']=proj_width if proj_height > 0: param['height']=proj_height if boundinglat > 0: param['boundinglat'] = boundinglat return param def angular_histogram(angs, view_resolution=3, disable_mirror=False, use_mirror=True, **extra): ''' Discretize the angles using healpix and tabulate an angular histogram :Parameters: angs : array Array of angles (theta, phi) in degrees view_resolution : int Healpix resolution where (2) 15 deg, (3) 7.5 deg ... disable_mirror : bool Use the full sphere for counting, not half sphere use_mirror : bool Display views on both hemispheres extra : dict Unused keyword arguments :Returns: angs : array Discretized angles count : array Number of projections for each angle ''' if view_resolution == 0: return angs, numpy.ones(len(angs)) total = healpix.res2npix(view_resolution, not disable_mirror) _logger.info("Healpix order %d gives %d views"%(view_resolution, total)) total = healpix.res2npix(view_resolution) pix = healpix.ang2pix(view_resolution, numpy.deg2rad(angs))#, half=not disable_mirror) count1 = numpy.bincount(pix) count = numpy.zeros(total, dtype=numpy.int) sel = numpy.nonzero(count1)[0] count[sel] = count1[sel] pix = numpy.arange(total, dtype=numpy.int) if not disable_mirror: mpix = healpix.pix2mirror(view_resolution, pix) for i in xrange(len(pix)): if i == mpix[i]: continue count[i] += count[mpix[i]] count[mpix[i]]=count[i] if not use_mirror: total = healpix.res2npix(view_resolution, True, True) pix = pix[:total] count=count[:total] #pix = numpy.nonzero(count)[0] #count = count[pix].copy().squeeze() angs = numpy.rad2deg(healpix.pix2ang(view_resolution, pix)) return angs, count def read_angles(filename, header=None, selection_file="", **extra): ''' Read in an alignment file and apply optional selection file :Parameters: filename : str Filename for alignment file header : int Header for alignment file selection_file : str Selection file for projections in alignment file extra : dict Unused keyword arguments :Returns: angles : array Array of angles for each projection ''' selection_file, sheader = format_utility.parse_header(selection_file) select = None if selection_file != "": if os.path.splitext(selection_file)[1]=='.mat': select = scipy.io.loadmat(selection_file) select = select[sheader[0]] else: select,sheader = format.read(selection_file, ndarray=True) select=select[:, sheader.index('id')] if format.get_format(filename) == format.star: align = format.read(filename, numeric=True) if select is None: select = xrange(len(align)) angles = numpy.zeros((len(align), 2)) else: select -= 1 angles = numpy.zeros((len(select), 2)) for j, i in enumerate(select): angles[j] = (align[i].rlnAngleTilt, align[i].rlnAngleRot) else: if select is not None: align = format.read_alignment(filename, header=header, map_ids='id') angles = numpy.zeros((len(select), 2)) for j, i in enumerate(select): angles[j] = (align[i].theta, align[i].phi) else: align = format.read_alignment(filename, header=header) angles = numpy.zeros((len(align), 2)) for i in xrange(len(align)): angles[i] = (align[i].theta, align[i].phi) return angles def setup_options(parser, pgroup=None, main_option=False): # Collection of options necessary to use functions in this script from ..core.app.settings import OptionGroup group = OptionGroup(parser, "Plot", "Options to control creation of the coverage plot", id=__name__) group.add_option("-p", projection="npstere", help="Map projection type") group.add_option("-d", dpi=300, help="Resolution of the image in dots per inch") group.add_option("", count_mode=('Shape', 'Color', 'Both'), help="Mode to measure the number of projections per view", default=2) group.add_option("", hide_zero_marker=False, help="Hide the zero markers") group.add_option("", color_map='cool', help="Set the color map") pgroup.add_option_group(group) group = OptionGroup(parser, "Histogram", "Options to control angular histogram") group.add_option("-r", view_resolution=3, help="Group views into a coarse grid: (2) 15 deg, (3) 7.5 deg ...") group.add_option("", disable_mirror=False, help="Disable mirroring over the equator for counting") pgroup.add_option_group(group) group = OptionGroup(parser, "Chimera", "Options to control chimera bild output") group.add_option("", chimera=False, help="Write out Chimera bild file") group.add_option("", particle_diameter=320.0, help="Radius from center for ball projections") group.add_option("", particle_center=0.0, help="Offset from center for ball projections") pgroup.add_option_group(group) group = OptionGroup(parser, "Projection", "Options to control map projection output") group.add_option("-a", area_mult=1.0, help="Circle area multiplier") group.add_option("", alpha=0.9, help="Transparency of the marker (1.0 = solid, 0.0 = no color)") group.add_option("", label_view=[], help="List of views to label with number and Euler Angles (theta,phi)") group.add_option("", use_scale=False, help="Display scale and color instead of color bar") group.add_option("", use_mirror=False, help="Display projections on both hemispheres") pgroup.add_option_group(group) group = OptionGroup(parser, "Layout", "Options to control the projection layout") group.add_option("", lon_zero="", help="Longitude for axis zero (empty for default values determined by projection)") group.add_option("", lat_zero="", help="Latitude for axis zero (empty for default values determined by projection)") group.add_option("", ll_lat=-90.0, help="Latitude of lower left hand corner of the desired map domain (degrees)") group.add_option("", ll_lon=-180.0, help="Longitude of lower left hand corner of the desired map domain (degrees)") group.add_option("", ur_lat=90.0, help="Latitude of upper right hand corner of the desired map domain (degrees)") group.add_option("", ur_lon=180.0, help="Longitude of upper right hand corner of the desired map domain (degrees)") group.add_option("", boundinglat="", help="Bounding latitude for npstere,spstere,nplaea,splaea,npaeqd,spaeqd") group.add_option("", proj_width=0, help="Width of desired map domain in projection coordinates (meters)") group.add_option("", proj_height=0, help="Height of desired map domain in projection coordinates (meters)") pgroup.add_option_group(group) if main_option: pgroup.add_option("-i", input_files=[], help="List of filenames for the input stacks or selection file", required_file=True, gui=dict(filetype="file-list")) pgroup.add_option("-o", output="", help="Output filename for the relion selection file", gui=dict(filetype="save"), required_file=True) pgroup.add_option("-s", selection_file="", help="Selection file", gui=dict(filetype="open")) parser.change_default(log_level=3) def check_options(options, main_option=False): #Check if the option values are valid from ..core.app.settings import OptionValueError if options.view_resolution < 1: raise OptionValueError, "--view-resolution must have a value greater than 0, found: %d"%options.view_resolution if options.chimera: if options.particle_diameter == 0: raise OptionValueError, "--particle-diameter must be greater than 0" if options.particle_center == 0: options.particle_center = options.particle_diameter/2.0 if options.boundinglat: try:float(options.boundinglat) except: raise OptionValueError, "--boundinglat must be a floating point number" if options.lon_zero: try:float(options.lon_zero) except: raise OptionValueError, "--lon-zero must be a floating point number" if options.lat_zero: try:float(options.lat_zero) except: raise OptionValueError, "--lat-zero must be a floating point number" maps=[m for m in matplotlib.cm.datad if not m.endswith("_r")] if options.color_map not in set(maps): raise OptionValueError, "%s is not a supported color map for --color-map, supported colormaps are\n%s"%(options.color_map, "\n".join(maps)) if options.projection not in set(basemap._projnames.keys()) and options.projection != '3d': raise OptionValueError, "%s is not a supported projection for --projection, supported projections are\n %s"%(options.projection, basemap.supported_projections) ''' projection_params = {'cyl' : 'corners only (no width/height)', 'merc' : 'corners plus lat_ts (no width/height)', 'tmerc' : 'lon_0,lat_0', 'omerc' : 'lon_0,lat_0,lat_1,lat_2,lon_1,lon_2,no_rot', 'mill' : 'corners only (no width/height)', 'gall' : 'corners only (no width/height)', 'lcc' : 'lon_0,lat_0,lat_1,lat_2', 'laea' : 'lon_0,lat_0', 'nplaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height', 'splaea' : 'bounding_lat,lon_0,lat_0,no corners or width/height', 'eqdc' : 'lon_0,lat_0,lat_1,lat_2', 'aeqd' : 'lon_0,lat_0', 'npaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height', 'spaeqd' : 'bounding_lat,lon_0,lat_0,no corners or width/height', 'aea' : 'lon_0,lat_0,lat_1', 'stere' : 'lon_0,lat_0,lat_ts', 'npstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height', 'spstere' : 'bounding_lat,lon_0,lat_0,no corners or width/height', 'cass' : 'lon_0,lat_0', 'poly' : 'lon_0,lat_0', 'ortho' : 'lon_0,lat_0,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height', 'geos' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height', 'nsper' : 'lon_0,satellite_height,llcrnrx,llcrnry,urcrnrx,urcrnry,no width/height', 'sinu' : 'lon_0,lat_0,no corners or width/height', 'moll' : 'lon_0,lat_0,no corners or width/height', 'hammer' : 'lon_0,lat_0,no corners or width/height', 'robin' : 'lon_0,lat_0,no corners or width/height', 'vandg' : 'lon_0,lat_0,no corners or width/height', 'mbtfpq' : 'lon_0,lat_0,no corners or width/height', 'gnom' : 'lon_0,lat_0', } ''' def main(): #Main entry point for this script program.run_hybrid_program(__name__, description = ''' Plot angular coverage on a map projection Example: Map a relion star file $ ara-coverage data.star -o plot.png Example: Chimera Bild file $ ara-coverage data.star -o plot.bild --chimera Example: 3D Scatter $ ara-coverage data.star -o plot.png --projection 3d ''', supports_MPI = False, use_version = False, ) def dependents(): return [] if __name__ == "__main__": main()
ezralanglois/arachnid
arachnid/util/coverage.py
Python
gpl-2.0
30,334
# Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """Functions for evaluating results on Cityscapes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import cv2 import logging import os import uuid import pycocotools.mask as mask_util from detectron.core.config import cfg from detectron.datasets.dataset_catalog import get_raw_dir logger = logging.getLogger(__name__) def evaluate_masks( json_dataset, all_boxes, all_segms, output_dir, use_salt=True, cleanup=False ): if cfg.CLUSTER.ON_CLUSTER: # On the cluster avoid saving these files in the job directory output_dir = '/tmp' res_file = os.path.join( output_dir, 'segmentations_' + json_dataset.name + '_results') if use_salt: res_file += '_{}'.format(str(uuid.uuid4())) res_file += '.json' results_dir = os.path.join(output_dir, 'results') if not os.path.exists(results_dir): os.mkdir(results_dir) os.environ['CITYSCAPES_DATASET'] = get_raw_dir(json_dataset.name) os.environ['CITYSCAPES_RESULTS'] = output_dir # Load the Cityscapes eval script *after* setting the required env vars, # since the script reads their values into global variables (at load time). import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling \ as cityscapes_eval roidb = json_dataset.get_roidb() for i, entry in enumerate(roidb): im_name = entry['image'] basename = os.path.splitext(os.path.basename(im_name))[0] txtname = os.path.join(output_dir, basename + 'pred.txt') with open(txtname, 'w') as fid_txt: if i % 10 == 0: logger.info('i: {}: {}'.format(i, basename)) for j in range(1, len(all_segms)): clss = json_dataset.classes[j] clss_id = cityscapes_eval.name2label[clss].id segms = all_segms[j][i] boxes = all_boxes[j][i] if segms == []: continue masks = mask_util.decode(segms) for k in range(boxes.shape[0]): score = boxes[k, -1] mask = masks[:, :, k] pngname = os.path.join( 'results', basename + '_' + clss + '_{}.png'.format(k)) # write txt fid_txt.write('{} {} {}\n'.format(pngname, clss_id, score)) # save mask cv2.imwrite(os.path.join(output_dir, pngname), mask * 255) logger.info('Evaluating...') cityscapes_eval.main([]) return None
facebookresearch/Detectron
detectron/datasets/cityscapes_json_dataset_evaluator.py
Python
apache-2.0
3,355
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from ai.h2o.sparkling.ml.params.HasInputCols import HasInputCols from ai.h2o.sparkling.ml.params.HasOutputCol import HasOutputCol class H2ODimReductionExtraParams(HasInputCols, HasOutputCol): pass
h2oai/sparkling-water
py-scoring/src/ai/h2o/sparkling/ml/params/H2ODimReductionExtraParams.py
Python
apache-2.0
988
''' The Pitt API, to access workable data of the University of Pittsburgh Copyright (C) 2015 Ritwik Gupta This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ''' import grequests import requests import json import time session = requests.session() CODES = [ 'ADMJ','ADMPS','AFRCNA','AFROTC','ANTH','ARABIC','ARTSC','ASL','ASTRON','ATHLTR','BACC','BCHS','BECN','BFIN','BHRM','BIND', 'BIOENG','BIOETH','BIOINF','BIOSC','BIOST','BMIS','BMKT','BOAH','BORG','BQOM','BSEO','BSPP','BUS','BUSACC','BUSADM','BUSBIS', 'BUSECN','BUSENV','BUSERV','BUSFIN','BUSHRM','BUSMKT','BUSORG','BUSQOM','BUSSCM','BUSSPP','CDACCT','CDENT','CEE','CGS','CHE', 'CHEM','CHIN','CLASS','CLRES','CLST','CMMUSIC','CMPBIO','COE','COEA','COEE','COMMRC','CS','CSD','DENHYG','DENT','DIASCI','DSANE', 'EAS','ECE','ECON','EDUC','ELI','EM','ENDOD','ENGCMP','ENGFLM','ENGLIT','ENGR','ENGSCI','ENGWRT','ENRES','EOH','EPIDEM','FACDEV', 'FILMG','FILMST','FP','FR','FTADMA','FTDA','FTDB','FTDC','FTDR','GEOL','GER','GERON','GREEK','GREEKM','GSWS','HAA','HIM','HINDI', 'HIST','HONORS','HPA','HPM','HPS','HRS','HUGEN','IDM','IE','IL','IMB','INFSCI','INTBP','IRISH','ISB','ISSP','ITAL','JPNSE','JS', 'KOREAN','LATIN','LAW','LCTL','LDRSHP','LEGLST','LING','LIS','LSAP','MATH','ME','MED','MEDEDU','MEMS','MILS','MOLBPH','MSCBIO', 'MSCBMP','MSCMP','MSE','MSIMM','MSMBPH','MSMGDB','MSMPHL','MSMVM','MSNBIO','MUSIC','NEURO','NPHS','NROSCI','NUR','NURCNS','NURNM', 'NURNP','NURSAN','NURSP','NUTR','ODO','OLLI','ORBIOL','ORSUR','OT','PAS','PEDC','PEDENT','PERIO','PERS','PETE','PHARM','PHIL','PHYS', 'PIA','POLISH','PORT','PROSTH','PS','PSY','PSYC','PSYED','PT','PUBHLT','PUBSRV','REHSCI','REL','RELGST','RESTD','RUSS','SA','SERCRO', 'SLAV','SLOVAK','SOC','SOCWRK','SPAN','STAT','SWAHIL','SWBEH','SWCOSA','SWE','SWGEN','SWINT','SWRES','SWWEL','TELCOM','THEA','TURKSH', 'UKRAIN','URBNST','VIET'] def get_books_data(courses_info): """Returns list of dictionaries of book information.""" request_objs = [] course_names = [] # need to save these instructors = [] # need to save these for i in range(len(courses_info)): book_info = courses_info[i] print(book_info) course_names.append(book_info['course_name']) instructors.append(book_info['instructor']) request_objs.append(grequests.get(get_department_url(book_info['department_code'], book_info['term']), timeout=10)) responses = grequests.map(request_objs) # parallel requests course_ids = [] j = 0 # counter to get course_names and instructors for r in responses: json_data = r.json() sections = [] course_id = '' for course_dict in (json_data): if course_dict['id'] == course_names[j]: sections = course_dict['sections'] break for section in sections: if section['instructor'] == instructors[j]: course_id = section['id'] break course_ids.append(course_id) j += 1 book_url = 'http://pitt.verbacompare.com/comparison?id=' if (len(course_ids) > 1): for course_id in course_ids: book_url += course_id + '%2C' # format url for multiple classes else: book_url += course_ids[0] # just one course book_data = session.get(book_url).text books_list = [] try: start = book_data.find('Verba.Compare.Collections.Sections') + len('Verba.Compare.Collections.Sections') + 1 end = book_data.find('}]}]);') + 4 info = [json.loads(book_data[start:end])] for i in range(len(info[0])): for j in range(len(info[0][i]['books'])): book_dict = {} big_dict = info[0][i]['books'][j] book_dict['isbn'] = big_dict['isbn'] book_dict['citation'] = big_dict['citation'] book_dict['title'] = big_dict['title'] book_dict['edition'] = big_dict['edition'] book_dict['author'] = big_dict['author'] books_list.append(book_dict) except ValueError as e: print('Error while decoding response, try again!') raise e return books_list # return list of dicts of books def get_department_url(department_code,term='2600'): # 2600 --> spring 2017 """Returns url for given department code.""" department_number = CODES.index(department_code) + 22399 if department_number > 22462: department_number += 2 # between codes DSANE and EAS 2 id numbers are skipped. if department_number > 22580: department_number += 1 # between codes PUBSRV and REHSCI 1 id number is skipped. url = 'http://pitt.verbacompare.com/compare/courses/' + '?id=' + str(department_number) + '&term_id=' + term return url
Rahi374/PittAPI
PittAPI/textbook.py
Python
gpl-2.0
5,449
from django import forms from django.contrib.admin.widgets import AdminSplitDateTime import object_tools class TestForm(forms.Form): pass class TestMediaForm(forms.Form): media_field = forms.fields.DateTimeField( widget=AdminSplitDateTime, ) class TestTool(object_tools.ObjectTool): name = 'test_tool' label = 'Test Tool' form_class = TestForm def view(self, request, extra_context=None): pass class TestMediaTool(object_tools.ObjectTool): name = 'test_media_tool' label = '' form_class = TestMediaForm def view(self): pass class TestInvalidTool(object_tools.ObjectTool): pass try: from django.apps import config except ImportError: config = None if config: def ready(cls): object_tools.tools.register(TestTool) object_tools.tools.register(TestMediaTool) object_tools.apps.ObjectToolsAppConfig.ready = ready else: object_tools.tools.register(TestTool) object_tools.tools.register(TestMediaTool)
sky-chen/django-object-tools
object_tools/tests/tools.py
Python
bsd-3-clause
1,025
# Copyright 2013 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from tests import TestCase from quodlibet.util.tags import sortkey, readable class TTags(TestCase): def test_basic(self): t = ["album", "title", "artist", "part", "musicbrainz_trackid"] t.sort(key=sortkey) expected = ["title", "artist", "album", "part", "musicbrainz_trackid"] self.failUnlessEqual(t, expected) def test_readable(self): self.assertEqual(readable("artistsort"), "artist (sort)") self.assertEqual(readable("~people:roles"), "people (roles)") self.assertEqual(readable("~peoplesort:roles"), "people (sort, roles)") self.assertEqual(readable("artist", plural=True), "artists") self.assertEqual(readable("artistsort", plural=True), "artists (sort)") self.assertEqual(readable("~"), "Invalid tag")
quodlibet/quodlibet
tests/test_util_tags.py
Python
gpl-2.0
1,086
from translator.utils import ( HTTPException, language_options, language_options_html, parse_javascript, register_filters) def test_http_exception(): try: raise HTTPException('An HTTP exception', 500) assert False except HTTPException as e: assert 'An HTTP exception' == e.message assert 500 == e.status_code def test_language_options(): options = language_options() for lang, name in options: assert isinstance(lang, str) assert isinstance(name, str) def test_language_options_html(): options = language_options_html() for option in options.split('\n'): assert option.startswith('<option') def test_parse_javascript(): assert parse_javascript('[,1,,2,,,3]') == \ [None, 1, None, 2, None, None, 3] def test_register_filters(testapp): register_filters(testapp.application)
suminb/translator
tests/test_utils.py
Python
gpl-3.0
886
# -*- coding: utf-8 -*- # emma # # Copyright (C) 2006 Florian Schmidt (flo@fastflo.de) # 2014 Nickolay Karnaukhov (mr.electronick@gmail.com) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import os import sys version = "0.7" new_instance = None our_module = None re_src_after_order_end = "(?:limit.*|procedure.*|for update.*|lock in share mode.*|[ \r\n\t]*$)" re_src_after_order = "(?:[ \r\n\t]" + re_src_after_order_end + ")" re_src_query_order = "(?is)(.*order[ \r\n\t]+by[ \r\n\t]+)(.*?)([ \r\n\t]*" + \ re_src_after_order_end + ")" emmalib_file = os.path.abspath(__file__) emma_path = os.path.dirname(emmalib_file) if os.path.isdir("emmalib"): # svn dev env emma_share_path = "emmalib" icons_path = "icons" glade_path = emma_share_path else: emma_share_path = os.path.join(sys.prefix, "share/emma/") icons_path = os.path.join(emma_share_path, "icons") glade_path = os.path.join(emma_share_path, "glade") last_update = 0
fastflo/emma
emmalib/Constants.py
Python
gpl-2.0
1,656
from xmodule.modulestore.tests.django_utils import xml_store_config, mongo_store_config, draft_mongo_store_config from django.conf import settings TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT TEST_DATA_XML_MODULESTORE = xml_store_config(TEST_DATA_DIR) TEST_DATA_MONGO_MODULESTORE = mongo_store_config(TEST_DATA_DIR) TEST_DATA_DRAFT_MONGO_MODULESTORE = draft_mongo_store_config(TEST_DATA_DIR)
IITBinterns13/edx-platform-dev
lms/djangoapps/courseware/tests/modulestore_config.py
Python
agpl-3.0
396
# -*- coding: utf-8 -*- # # Copyright © 2014 René Samselnig # # This file is part of Database Navigator. # # Database Navigator is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Database Navigator is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Database Navigator. If not, see <http://www.gnu.org/licenses/>. # __drivers__ = [] KIND_VALUE = 'value' KIND_FOREIGN_KEY = 'foreign-key' KIND_FOREIGN_VALUE = 'foreign-value' IMAGE_VALUE = 'images/value.png' IMAGE_FOREIGN_KEY = 'images/foreign-key.png' IMAGE_FOREIGN_VALUE = 'images/foreign-value.png' OPTION_URI_SINGLE_ROW_FORMAT = u'%s%s/?%s' OPTION_URI_MULTIPLE_ROWS_FORMAT = u'%s%s?%s' OPERATORS = { '=': lambda c, v: c.__eq__(v), '!=': lambda c, v: c.__ne__(v), '~': lambda c, v: c.like(v), '*': lambda c, v: c.like(v), '>': lambda c, v: c.__gt__(v), '>=': lambda c, v: c.__ge__(v), '<=': lambda c, v: c.__le__(v), '<': lambda c, v: c.__lt__(v), 'in': lambda c, v: c.in_(v), ':': lambda c, v: c.in_(v) }
resamsel/dbmanagr
src/dbmanagr/__init__.py
Python
gpl-3.0
1,450
import numpy as np import tensorflow as tf def weight_variable(shape): """ Weight variables for connections. Returns a tensorflow variable. """ initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): """ Bias variable for nodes. Returns a tensorflow variable.""" initial = tf.random_uniform(shape=shape, minval=0.05, maxval=0.15) return tf.Variable(initial) def fc_layer(input, shape): """ Implements a fully connected layer given shape. """ weights = weight_variable(shape) bias = bias_variable([shape[1]]) result = tf.matmul(input, weights) + bias return result def conv_layer(input, filter_shape): """ Returns a Tensorflow object. Implements a convolutional layer with relu activation function and bias. """ conv_weights = weight_variable(filter_shape) conv = tf.nn.conv2d(input, conv_weights, strides=[1,1,1,1], padding='SAME') conv_bias = bias_variable([filter_shape[3]]) conv_with_bias = tf.nn.bias_add(conv, conv_bias) conv_with_activation = tf.nn.relu(conv_with_bias) return conv_with_activation def pool_layer(conv_layer, type="max", padding='SAME'): """ Returns a Tensorflow object. Implements either a max-pool or average-pool depending on the parameters. """ if (type == "max"): return tf.nn.max_pool(conv_layer, ksize=[1,2,1,1], strides=[1,2,1,1], padding=padding) elif (type == "avg"): return tf.nn.avg_pool(conv_layer, ksize=[1,2,1,1], strides=[1,2,1,1], padding=padding)
AlperenAydin/GenreRecognition
modified_src/cnn_functions.py
Python
mit
1,559
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='DevCentrySSHServer', fields=[ ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)), ('name', models.CharField(verbose_name='Server name', max_length=40, unique=True)), ('ip', models.IPAddressField(verbose_name='Server IP', unique=True)), ('uniq_key', models.UUIDField(verbose_name='Unique key')), ('access_token', models.CharField(verbose_name='Access token', max_length=128)), ], options={ 'verbose_name': 'DevCentry SSH Server', 'verbose_name_plural': 'DevCentry SSH Servers', }, ), ]
zaabjuda/devcentry
internal_api/migrations/0001_initial.py
Python
gpl-3.0
949
import imp import os import sys import warnings from os.path import dirname, exists, join, isdir from distutils.dep_util import newer from distutils.sysconfig import get_config_var from numpy.distutils import log from numpy.distutils.system_info import get_info from setup_common import is_released # XXX: ugly, we use a class to avoid calling twice some expensive functions in # config.h/numpyconfig.h. I don't see a better way because distutils force # config.h generation inside an Extension class, and as such sharing # configuration informations between extensions is not easy. # Using a pickled-based memoize does not work because config_cmd is an instance # method, which cPickle does not like. try: import cPickle as _pik except ImportError: import pickle as _pik import copy PYTHON_HAS_UNICODE_WIDE = True def ndarray_include_dir(): info = get_info('ndarray') path = info['include_dirs'][0] assert isdir(path) return path def ndarray_lib_dir(): info = get_info('ndarray') path = info['library_dirs'][0] assert isdir(path) return path def pythonlib_dir(): """return path where libpython* is.""" if sys.platform == 'win32': return join(sys.prefix, "libs") else: return get_config_var('LIBDIR') def is_npy_no_signal(): """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration header.""" return sys.platform == 'win32' def is_npy_no_smp(): """Return True if the NPY_NO_SMP symbol must be defined in public header (when SMP support cannot be reliably enabled).""" # Python 2.3 causes a segfault when # trying to re-acquire the thread-state # which is done in error-handling # ufunc code. NPY_ALLOW_C_API and friends # cause the segfault. So, we disable threading # for now. if sys.version[:5] < '2.4.2': return True # Perhaps a fancier check is in order here. # so that threads are only enabled if there # are actually multiple CPUS? -- but # threaded code can be nice even on a single # CPU so that long-calculating code doesn't # block. try: nosmp = os.environ['NPY_NOSMP'] return True except KeyError: return False def win32_checks(deflist): from numpy.distutils.misc_util import get_build_architecture a = get_build_architecture() # Distutils hack on AMD64 on windows print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \ (a, os.name, sys.platform)) if a == 'AMD64': deflist.append('DISTUTILS_USE_SDK') def check_mathlib(config_cmd): # Testing the C math library mathlibs = [] mathlibs_choices = [[],['m'],['cpml']] mathlib = os.environ.get('MATHLIB') if mathlib: mathlibs_choices.insert(0,mathlib.split(',')) for libs in mathlibs_choices: if config_cmd.check_func("exp", libraries=libs, decl=True, call=True): mathlibs = libs break else: raise EnvironmentError("math library missing; rerun " "setup.py after setting the " "MATHLIB env variable") return mathlibs def visibility_define(config): """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty string).""" if config.check_compiler_gcc4(): return '__attribute__((visibility("hidden")))' else: return '' def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration,dot_join from numpy.distutils.system_info import get_info, default_lib_dirs config = Configuration('core', parent_package, top_path) local_dir = config.local_path codegen_dir = join(local_dir, 'code_generators') if is_released(config): warnings.simplefilter('error', MismatchCAPIWarning) generate_umath_py = join(codegen_dir, 'generate_umath.py') n = dot_join(config.name, 'generate_umath') generate_umath = imp.load_module('_'.join(n.split('.')), open(generate_umath_py,'U'), generate_umath_py, ('.py','U',1)) header_dir = 'include/numpy' # this is relative to config.path_in_package def generate_config_h(ext, build_dir): target = join(build_dir,header_dir,'config.h') d = dirname(target) if not exists(d): os.makedirs(d) if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) moredefs = [] # Check math library and C99 math funcs availability mathlibs = check_mathlib(config_cmd) moredefs.append(('MATHLIB',','.join(mathlibs))) # Signal check if is_npy_no_signal(): moredefs.append('__NPY_PRIVATE_NO_SIGNAL') # Windows checks if sys.platform=='win32' or os.name=='nt': win32_checks(moredefs) # Inline check inline = config_cmd.check_inline() # Check whether we need our own wide character support if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']): PYTHON_HAS_UNICODE_WIDE = True else: PYTHON_HAS_UNICODE_WIDE = False # Py3K check if sys.version_info[0] == 3: moredefs.append(('NPY_PY3K', 1)) # Generate the config.h file from moredefs target_f = open(target, 'w') for d in moredefs: if isinstance(d,str): target_f.write('#define %s\n' % (d)) else: target_f.write('#define %s %s\n' % (d[0],d[1])) # define inline to our keyword, or nothing target_f.write('#ifndef __cplusplus\n') if inline == 'inline': target_f.write('/* #undef inline */\n') else: target_f.write('#define inline %s\n' % inline) target_f.write('#endif\n') # add the guard to make sure config.h is never included directly, # but always through numpy_config.h target_f.write(""" #ifndef _NUMPY_CONFIG_H_ #error config.h should never be included directly, include numpy_config.h instead #endif """) target_f.close() print('File:',target) target_f = open(target) print(target_f.read()) target_f.close() print('EOF') else: mathlibs = [] target_f = open(target) for line in target_f.readlines(): s = '#define MATHLIB' if line.startswith(s): value = line[len(s):].strip() if value: mathlibs.extend(value.split(',')) target_f.close() # Ugly: this can be called within a library and not an extension, # in which case there is no libraries attributes (and none is # needed). if hasattr(ext, 'libraries'): ext.libraries.extend(mathlibs) incl_dir = dirname(target) if incl_dir not in config.numpy_include_dirs: config.numpy_include_dirs.append(incl_dir) return target def generate_numpyconfig_h(ext, build_dir): """Depends on config.h: generate_config_h has to be called before !""" target = join(build_dir,header_dir,'_numpyconfig.h') d = dirname(target) if not exists(d): os.makedirs(d) if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) moredefs = [] if is_npy_no_signal(): moredefs.append(('NPY_NO_SIGNAL', 1)) if is_npy_no_smp(): moredefs.append(('NPY_NO_SMP', 1)) else: moredefs.append(('NPY_NO_SMP', 0)) mathlibs = check_mathlib(config_cmd) # Check wether we can use inttypes (C99) formats if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): moredefs.append(('NPY_USE_C99_FORMATS', 1)) # visibility check hidden_visibility = visibility_define(config_cmd) moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility)) # Add the C API/ABI versions moredefs.append(('NUMPY_ABI_VERSION', '2.0.0')) moredefs.append(('NUMPY_API_VERSION', '2.0.0')) # Add moredefs to header target_f = open(target, 'w') for d in moredefs: if isinstance(d,str): target_f.write('#define %s\n' % (d)) else: target_f.write('#define %s %s\n' % (d[0],d[1])) # Define __STDC_FORMAT_MACROS target_f.write(""" #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS 1 #endif """) target_f.close() # Dump the numpyconfig.h header to stdout print('File: %s' % target) target_f = open(target) print(target_f.read()) target_f.close() print('EOF') config.add_data_files((header_dir, target)) return target def generate_api_func(module_name): def generate_api(ext, build_dir): script = join(codegen_dir, module_name + '.py') sys.path.insert(0, codegen_dir) try: m = __import__(module_name) log.info('executing %s', script) h_file, c_file, doc_file = m.generate_api( join(build_dir, header_dir)) finally: del sys.path[0] config.add_data_files((header_dir, h_file), (header_dir, doc_file)) return (h_file,) return generate_api generate_numpy_api = generate_api_func('generate_numpy_api') generate_ufunc_api = generate_api_func('generate_ufunc_api') config.add_include_dirs(join(local_dir, "src", "private")) config.add_include_dirs(join(local_dir, "src")) config.add_include_dirs(join(local_dir)) config.add_include_dirs(ndarray_include_dir()) # Multiarray version: this function is needed to build foo.c from foo.c.src # when foo.c is included in another file and as such not in the src # argument of build_ext command def generate_multiarray_templated_sources(ext, build_dir): from numpy.distutils.misc_util import get_cmd subpath = join('src', 'multiarray') sources = [join(local_dir, subpath, 'scalartypes.c.src'), join(local_dir, subpath, 'arraytypes.c.src')] # numpy.distutils generate .c from .c.src in weird directories, we have # to add them there as they depend on the build_dir config.add_include_dirs(join(build_dir, subpath)) cmd = get_cmd('build_src') cmd.ensure_finalized() cmd.template_sources(sources, ext) # umath version: this function is needed to build foo.c from foo.c.src # when foo.c is included in another file and as such not in the src # argument of build_ext command def generate_umath_templated_sources(ext, build_dir): from numpy.distutils.misc_util import get_cmd subpath = join('src', 'umath') sources = [join(local_dir, subpath, 'loops.c.src'), join(local_dir, subpath, 'umathmodule.c.src')] # numpy.distutils generate .c from .c.src in weird directories, we have # to add them there as they depend on the build_dir config.add_include_dirs(join(build_dir, subpath)) cmd = get_cmd('build_src') cmd.ensure_finalized() cmd.template_sources(sources, ext) def generate_umath_c(ext,build_dir): target = join(build_dir,header_dir,'__umath_generated.c') dir = dirname(target) if not exists(dir): os.makedirs(dir) script = generate_umath_py if newer(script,target): f = open(target,'w') f.write(generate_umath.make_code(generate_umath.defdict, generate_umath.__file__)) f.close() return [] config.add_data_files('include/numpy/*.h') config.add_include_dirs(join('src', 'multiarray')) config.add_include_dirs(join('src', 'umath')) config.numpy_include_dirs.extend(config.paths('include')) deps = [join('include','numpy','*object.h'), 'include/numpy/fenv/fenv.c', 'include/numpy/fenv/fenv.h', join(codegen_dir,'genapi.py'), ] # Don't install fenv unless we need them. if sys.platform == 'cygwin': config.add_data_dir('include/numpy/fenv') config.add_extension('_sort', sources=[join('src','_sortmodule.c.src'), generate_config_h, generate_numpyconfig_h, generate_numpy_api, ], library_dirs=[ndarray_lib_dir()], libraries=['ndarray'], ) # npymath needs the config.h and numpyconfig.h files to be generated, but # build_clib cannot handle generate_config_h and generate_numpyconfig_h # (don't ask). Because clib are generated before extensions, we have to # explicitly add an extension which has generate_config_h and # generate_numpyconfig_h as sources *before* adding npymath. subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")]) multiarray_deps = [ join('src', 'multiarray', 'arrayobject.h'), join('src', 'multiarray', 'arraytypes.h'), join('src', 'multiarray', 'buffer.h'), join('src', 'multiarray', 'calculation.h'), join('src', 'multiarray', 'common.h'), join('src', 'multiarray', 'conversion_utils.h'), join('src', 'multiarray', 'convert_datatype.h'), join('src', 'multiarray', 'ctors.h'), join('src', 'multiarray', 'descriptor.h'), join('src', 'multiarray', 'getset.h'), join('src', 'multiarray', 'hashdescr.h'), join('src', 'multiarray', 'iterators.h'), join('src', 'multiarray', 'mapping.h'), join('src', 'multiarray', 'methods.h'), join('src', 'multiarray', 'multiarraymodule.h'), join('src', 'multiarray', 'numpymemoryview.h'), join('src', 'multiarray', 'number.h'), join('src', 'multiarray', 'refcount.h'), join('src', 'multiarray', 'scalartypes.h'), join('src', 'multiarray', 'sequence.h'), join('src', 'multiarray', 'shape.h'), join('src', 'multiarray', 'ucsnarrow.h'), join('src', 'multiarray', 'usertypes.h'), ] multiarray_src = [ join('src', 'multiarray', 'arrayobject.c'), join('src', 'multiarray', 'arraytypes.c.src'), join('src', 'multiarray', 'buffer.c'), join('src', 'multiarray', 'calculation.c'), join('src', 'multiarray', 'common.c'), join('src', 'multiarray', 'conversion_utils.c'), join('src', 'multiarray', 'convert.c'), join('src', 'multiarray', 'convert_datatype.c'), join('src', 'multiarray', 'ctors.c'), join('src', 'multiarray', 'datetime.c'), join('src', 'multiarray', 'descriptor.c'), join('src', 'multiarray', 'flagsobject.c'), join('src', 'multiarray', 'getset.c'), join('src', 'multiarray', 'hashdescr.c'), join('src', 'multiarray', 'item_selection.c'), join('src', 'multiarray', 'iterators.c'), join('src', 'multiarray', 'mapping.c'), join('src', 'multiarray', 'methods.c'), join('src', 'multiarray', 'multiarraymodule.c'), join('src', 'multiarray', 'number.c'), join('src', 'multiarray', 'numpymemoryview.c'), join('src', 'multiarray', 'refcount.c'), join('src', 'multiarray', 'scalarapi.c'), join('src', 'multiarray', 'scalartypes.c.src'), join('src', 'multiarray', 'sequence.c'), join('src', 'multiarray', 'shape.c'), join('src', 'multiarray', 'usertypes.c'), ] if PYTHON_HAS_UNICODE_WIDE: multiarray_src.append(join('src', 'multiarray', 'ucsnarrow.c')) umath_src = [join('src', 'umath', 'umathmodule.c.src'), join('src', 'umath', 'loops.c.src'), join('src', 'umath', 'ufunc_object.c')] umath_deps = [generate_umath_py, join(codegen_dir,'generate_ufunc_api.py')] config.add_extension('multiarray', sources = multiarray_src + [generate_config_h, generate_numpyconfig_h, generate_numpy_api, join(codegen_dir, 'generate_numpy_api.py'), join('*.py')], depends = deps + multiarray_deps, library_dirs=[ndarray_lib_dir()], libraries=['ndarray'], ) config.add_extension('umath', sources = [generate_config_h, generate_numpyconfig_h, generate_umath_c, generate_ufunc_api, ] + umath_src, depends = deps + umath_deps, library_dirs=[ndarray_lib_dir()], libraries=['ndarray'], ) config.add_extension('scalarmath', sources=[join('src','scalarmathmodule.c.src'), generate_config_h, generate_numpyconfig_h, generate_numpy_api, generate_ufunc_api], library_dirs=[ndarray_lib_dir()], libraries=['ndarray'], ) # Configure blasdot blas_info = get_info('blas_opt',0) #blas_info = {} def get_dotblas_sources(ext, build_dir): if blas_info: if ('NO_ATLAS_INFO',1) in blas_info.get('define_macros',[]): # dotblas needs ATLAS, Fortran compiled blas will not be sufficient. return None return ext.depends[:1] return None # no extension module will be built config.add_extension('_dotblas', sources = [get_dotblas_sources], depends=[join('blasdot','_dotblas.c'), join('blasdot','cblas.h'), ], include_dirs = ['blasdot'], library_dirs=[ndarray_lib_dir()], libraries=['ndarray'], extra_info = blas_info ) config.add_extension('umath_tests', sources = [join('src','umath', 'umath_tests.c.src')]) config.add_extension('multiarray_tests', sources = [join('src', 'multiarray', 'multiarray_tests.c.src')]) config.add_data_dir('tests') config.add_data_dir('tests/data') config.make_svn_version_py() return config if __name__=='__main__': from numpy.distutils.core import setup setup(configuration=configuration)
numpy/numpy-refactor
numpy/core/setup.py
Python
bsd-3-clause
19,785
import glob import os from subprocess import check_call def setup(loader, variant=None): _, variant = loader.setup_project_env(None, variant) venv_type = loader.setup_virtualenv() config = loader.get_project_config() python_bin = loader.get_python_bin() odoo_dir = os.path.join(loader.config['work_dir'], config.get('odoo_dir', 'lib/odoo')) # Is directory writable? if os.access(odoo_dir, os.W_OK): binargs = [python_bin, 'setup.py', 'install'] if venv_type == 'python': binargs.append('--user') return check_call(binargs, cwd=odoo_dir) odoo_ver = config.get('odoo_version') if odoo_ver: # Check sdist of specific version. filename = os.path.join(odoo_dir, 'dist', 'odoo-%s.0.tar.gz' % odoo_ver) if not os.path.exists(filename): filename = None else: # Check any sdist. files = glob.glob(os.path.join(odoo_dir, 'dist', 'odoo-*.tar.gz')) if files: # Get latest version. files.sort(reverse=True) filename = files[0] else: filename = None if not filename: raise RuntimeError("Odoo directory is not writable for setup.py and " "source dist tarball not found.") binargs = [python_bin, '-m', 'pip', 'install', filename] if venv_type == 'python': binargs.append('--user') return check_call(binargs)
dozymoe/fireh_runner
setup_modules/odoo.py
Python
mit
1,453
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate from Acquisition import aq_base def render_tal_expressions(context,id='default',html=None,content_type='text/html'): """ @attention: Context must be in a public state for this to render! Even if the user has permission to view, it will not allow them. @param context: object context @param id: string name of this html snippet, doesn't really matter @param html: html with TAL @param content_type: type of content, 'text/plain'|'text/html'|etc... """ pt = ZopePageTemplate(id=id,text=html,content_type=content_type) pt = aq_base(pt).__of__(context) # set context of template return pt() def catalogSuperBrain(args): """ Experimental """ items = [] for func in args: items.append(tuple([func.__name__,func()])) t = tuple(items) if len(str(t)) > 300: print "Warning: Superbrain is larger than 300 characters. Size is " + str(len(str(t))) + " characters." return t def brain_surgery(brains): """ Experimental """ i = 0 try: for brain in brains: try: sb = SuperBrain() for f in brain.fetch: sb.set(f[0],f[1]) brains[i].fetch = sb except Exception as e: pass # ignore brains without brain.fetch column i += 1 except Exception as e: print "Error in brain_surgery: " + str(e) return brains class SuperBrain(object): """ Experimental """ def set(self,name,value): setattr(self,name,value) def get(self,name): return getattr(self,name)
uwosh/uwosh.librarytypes
uwosh/librarytypes/util.py
Python
gpl-2.0
1,754
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Kokkos(CMakePackage, CudaPackage): """Kokkos implements a programming model in C++ for writing performance portable applications targeting all major HPC platforms.""" homepage = "https://github.com/kokkos/kokkos" git = "https://github.com/kokkos/kokkos.git" url = "https://github.com/kokkos/kokkos/archive/3.1.01.tar.gz" maintainers = ['jjwilke'] version('develop', branch='develop') version('master', branch='master') version('3.2.00', sha256='05e1b4dd1ef383ca56fe577913e1ff31614764e65de6d6f2a163b2bddb60b3e9') version('3.1.01', sha256='ff5024ebe8570887d00246e2793667e0d796b08c77a8227fe271127d36eec9dd') version('3.1.00', sha256="b935c9b780e7330bcb80809992caa2b66fd387e3a1c261c955d622dae857d878") version('3.0.00', sha256="c00613d0194a4fbd0726719bbed8b0404ed06275f310189b3493f5739042a92b") depends_on("cmake@3.10:", type='build') devices_variants = { 'cuda': [False, 'Whether to build CUDA backend'], 'openmp': [False, 'Whether to build OpenMP backend'], 'pthread': [False, 'Whether to build Pthread backend'], 'serial': [True, 'Whether to build serial backend'], 'hip': [False, 'Whether to build HIP backend'], } conflicts("+hip", when="@:3.0") tpls_variants = { 'hpx': [False, 'Whether to enable the HPX library'], 'hwloc': [False, 'Whether to enable the HWLOC library'], 'numactl': [False, 'Whether to enable the LIBNUMA library'], 'memkind': [False, 'Whether to enable the MEMKIND library'], } options_variants = { 'aggressive_vectorization': [False, 'Aggressively vectorize loops'], 'compiler_warnings': [False, 'Print all compiler warnings'], 'cuda_lambda': [False, 'Activate experimental lambda features'], 'cuda_ldg_intrinsic': [False, 'Use CUDA LDG intrinsics'], 'cuda_relocatable_device_code': [False, 'Enable RDC for CUDA'], 'cuda_uvm': [False, 'Enable unified virtual memory (UVM) for CUDA'], 'debug': [False, 'Activate extra debug features - may increase compiletimes'], 'debug_bounds_check': [False, 'Use bounds checking - will increase runtime'], 'debug_dualview_modify_check': [False, 'Debug check on dual views'], 'deprecated_code': [False, 'Whether to enable deprecated code'], 'examples': [False, 'Whether to build OpenMP backend'], 'explicit_instantiation': [False, 'Explicitly instantiate template types'], 'hpx_async_dispatch': [False, 'Whether HPX supports asynchronous dispath'], 'profiling': [True, 'Create bindings for profiling tools'], 'profiling_load_print': [False, 'Print which profiling tools got loaded'], 'qthread': [False, 'Eenable the QTHREAD library'], 'tests': [False, 'Build for tests'], } amd_gpu_arches = [ 'fiji', 'gfx901', 'vega900', 'vega906', ] variant("amd_gpu_arch", default='none', values=amd_gpu_arches, description="AMD GPU architecture") conflicts("+hip", when="amd_gpu_arch=none") spack_micro_arch_map = { "graviton": "", "graviton2": "", "aarch64": "", "arm": "", "ppc": "", "ppc64": "", "ppc64le": "", "ppcle": "", "sparc": None, "sparc64": None, "x86": "", "x86_64": "", "thunderx2": "THUNDERX2", "k10": None, "zen": "ZEN", "bulldozer": "", "piledriver": "", "zen2": "ZEN2", "steamroller": "KAVERI", "excavator": "CARIZO", "a64fx": "", "power7": "POWER7", "power8": "POWER8", "power9": "POWER9", "power8le": "POWER8", "power9le": "POWER9", "i686": None, "pentium2": None, "pentium3": None, "pentium4": None, "prescott": None, "nocona": None, "nehalem": None, "sandybridge": "SNB", "haswell": "HSW", "mic_knl": "KNL", "cannonlake": "SKX", "cascadelake": "SKX", "westmere": "WSM", "core2": None, "ivybridge": "SNB", "broadwell": "BDW", # @AndrewGaspar: Kokkos does not have an arch for plain-skylake - only # for Skylake-X (i.e. Xeon). For now, I'm mapping this to Broadwell # until Kokkos learns to optimize for SkyLake without the AVX-512 # extensions. SkyLake with AVX-512 will still be optimized using the # separate `skylake_avx512` arch. "skylake": "BDW", "icelake": "SKX", "skylake_avx512": "SKX", } spack_cuda_arch_map = { "30": 'kepler30', "32": 'kepler32', "35": 'kepler35', "37": 'kepler37', "50": 'maxwell50', "52": 'maxwell52', "53": 'maxwell53', "60": 'pascal60', "61": 'pascal61', "70": 'volta70', "72": 'volta72', "75": 'turing75', } cuda_arches = spack_cuda_arch_map.values() conflicts("+cuda", when="cuda_arch=none") devices_values = list(devices_variants.keys()) for dev in devices_variants: dflt, desc = devices_variants[dev] variant(dev, default=dflt, description=desc) options_values = list(options_variants.keys()) for opt in options_values: if "cuda" in opt: conflicts('+%s' % opt, when="~cuda", msg="Must enable CUDA to use %s" % opt) dflt, desc = options_variants[opt] variant(opt, default=dflt, description=desc) tpls_values = list(tpls_variants.keys()) for tpl in tpls_values: dflt, desc = tpls_variants[tpl] variant(tpl, default=dflt, description=desc) depends_on(tpl, when="+%s" % tpl) variant("wrapper", default=False, description="Use nvcc-wrapper for CUDA build") depends_on("kokkos-nvcc-wrapper", when="+wrapper") depends_on("kokkos-nvcc-wrapper@develop", when="@develop+wrapper") depends_on("kokkos-nvcc-wrapper@master", when="@master+wrapper") conflicts("+wrapper", when="~cuda") variant("std", default="11", values=["11", "14", "17", "20"], multi=False) variant("pic", default=False, description="Build position independent code") # nvcc does not currently work with C++17 or C++20 conflicts("+cuda", when="std=17") conflicts("+cuda", when="std=20") variant('shared', default=True, description='Build shared libraries') def append_args(self, cmake_prefix, cmake_options, spack_options): for opt in cmake_options: enablestr = "+%s" % opt optuc = opt.upper() optname = "Kokkos_%s_%s" % (cmake_prefix, optuc) option = None if enablestr in self.spec: option = "-D%s=ON" % optname else: # explicitly turn off if not enabled # this avoids any confusing implicit defaults # that come from the CMake option = "-D%s=OFF" % optname if option not in spack_options: spack_options.append(option) def setup_dependent_package(self, module, dependent_spec): try: self.spec.kokkos_cxx = self.spec["kokkos-nvcc-wrapper"].kokkos_cxx except Exception: self.spec.kokkos_cxx = spack_cxx def cmake_args(self): spec = self.spec options = [] isdiy = "+diy" in spec if isdiy: options.append("-DSpack_WORKAROUND=On") if "+pic" in spec: options.append("-DCMAKE_POSITION_INDEPENDENT_CODE=ON") spack_microarches = [] if "+cuda" in spec: # this is a list for cuda_arch in spec.variants["cuda_arch"].value: if not cuda_arch == "none": kokkos_arch_name = self.spack_cuda_arch_map[cuda_arch] spack_microarches.append(kokkos_arch_name) kokkos_microarch_name = self.spack_micro_arch_map[spec.target.name] if kokkos_microarch_name: spack_microarches.append(kokkos_microarch_name) for arch in self.amd_gpu_arches: keyval = "amd_gpu_arch=%s" % arch if keyval in spec: spack_microarches.append(arch) for arch in spack_microarches: options.append("-DKokkos_ARCH_%s=ON" % arch.upper()) self.append_args("ENABLE", self.devices_values, options) self.append_args("ENABLE", self.options_values, options) self.append_args("ENABLE", self.tpls_values, options) for tpl in self.tpls_values: var = "+%s" % tpl if var in self.spec: options.append("-D%s_DIR=%s" % (tpl, spec[tpl].prefix)) # we do not need the compiler wrapper from Spack # set the compiler explicitly (may be Spack wrapper or nvcc-wrapper) try: options.append("-DCMAKE_CXX_COMPILER=%s" % self.spec["kokkos-nvcc-wrapper"].kokkos_cxx) except Exception: options.append("-DCMAKE_CXX_COMPILER=%s" % spack_cxx) # Set the C++ standard to use options.append("-DKokkos_CXX_STANDARD=%s" % self.spec.variants["std"].value) options.append('-DBUILD_SHARED_LIBS=%s' % ('+shared' in self.spec)) return options
iulian787/spack
var/spack/repos/builtin/packages/kokkos/package.py
Python
lgpl-2.1
9,965
""" Django settings for life3.0 project. Generated by 'django-admin startproject' using Django 1.11.6. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'tz#om7iapmd!s5)^2*3ka1q^+k$f$pyx#gct-d$jo8n9)$#ern' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', # 'django.contrib.sessions', # 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'life3.dashboard', 'life3.login', 'life3.user', ] MIDDLEWARE = [ #'life3.dashboard.custom_middleware.SimpleMiddlerware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'life3.config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.jinja2.Jinja2' , 'DIRS': [os.path.join(BASE_DIR, 'static/templates')] , 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] #AUTH_USER_MODEL = 'dashboard.LifeUser' WSGI_APPLICATION = 'life3.config.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Seoul' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ # URL prefix for web resource STATIC_URL = '/assets/' # Django static dirs # command> python manage.py findstatic <FILE> STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static/js/built'), os.path.join(BASE_DIR, 'static/css'), os.path.join(BASE_DIR, 'static/templates'), ) # For web server serving. Only for production # command> python manage.py collectstatic STATIC_ROOT = os.path.join(BASE_DIR, 'static-files')
BoraDowon/Life3.0
life3/config/settings/base.py
Python
mit
3,600
# Copyright 2022. ThingsBoard # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from simplejson import dumps from thingsboard_gateway.connectors.connector import log class BackwardCompatibilityAdapter: config_files_count = 1 CONFIG_PATH = None def __init__(self, config, config_dir): self.__config = config self.__config_dir = config_dir BackwardCompatibilityAdapter.CONFIG_PATH = self.__config_dir self.__keys = ['host', 'port', 'type', 'method', 'timeout', 'byteOrder', 'wordOrder', 'retries', 'retryOnEmpty', 'retryOnInvalid', 'baudrate'] @staticmethod def __save_json_config_file(config): with open( f'{BackwardCompatibilityAdapter.CONFIG_PATH}modbus_new_{BackwardCompatibilityAdapter.config_files_count}.json', 'w') as file: file.writelines(dumps(config, sort_keys=False, indent=' ', separators=(',', ': '))) BackwardCompatibilityAdapter.config_files_count += 1 def convert(self): if not self.__config.get('server'): return self.__config log.warning( 'You are using old configuration structure for Modbus connector. It will be DEPRECATED in the future ' 'version! New config file "modbus_new.json" was generated in %s folder. Please, use it.', self.CONFIG_PATH) log.warning('You have to manually connect the new generated config file to tb_gateway.yaml!') slaves = [] for device in self.__config['server'].get('devices', []): slave = {**device} for key in self.__keys: if not device.get(key): slave[key] = self.__config['server'].get(key) slave['pollPeriod'] = slave['timeseriesPollPeriod'] slaves.append(slave) result_dict = {'master': {'slaves': slaves}, 'slave': self.__config.get('slave')} self.__save_json_config_file(result_dict) return result_dict
thingsboard/thingsboard-gateway
thingsboard_gateway/connectors/modbus/backward_compability_adapter.py
Python
apache-2.0
2,534
import pkg_resources __version__ = pkg_resources.get_distribution("django-eldarion-ajax").version
eldarion/django-eldarion-ajax
eldarion/ajax/__init__.py
Python
mit
100
''' Created on Nov 15, 2011 @author: mmornati ''' from webui.abstracts import ServerOperation from django.core.urlresolvers import reverse from webui.core import kermit_modules class StartServer(ServerOperation): def get_visible(self, server, user): return not server.online def get_enabled(self, server): return not server.online def get_name(self): return 'Start Server' def get_group_name(self): return 'Server Control' def get_image(self): return 'start.png' def get_url(self, hostname, instancename=None): return reverse('call_mcollective_with_arguments', kwargs={'wait_for_response':'True'}) def get_agent(self): return 'rpcutil' def get_action(self): return 'ping' def get_filter(self, hostname): return 'identity=%s'%hostname class StopServer(ServerOperation): def get_visible(self, server, user): return server.online def get_enabled(self, server): return server.online def get_name(self): return 'Stop Server' def get_group_name(self): return 'Server Control' def get_image(self): return 'stop.png' def get_url(self, hostname, instancename=None): return reverse('call_mcollective') def get_agent(self): return 'system' def get_action(self): return 'halt' def get_filter(self, hostname): return 'identity=%s'%hostname kermit_modules.register(StartServer) kermit_modules.register(StopServer)
kermitfr/kermit-webui
src/webui/plugins/servercontrol/operations.py
Python
gpl-3.0
1,617
#!/usr/bin/env python ''' @author Luke Campbell <LCampbell@ASAScience.com> @file query_language @date 06/12/12 10:50 @description DESCRIPTION ''' from pyparsing import ParseException, Regex, quotedString, CaselessLiteral, MatchFirst, removeQuotes, Optional from pyon.core.exception import BadRequest class QueryLanguage(object): ''' Pyon Discovery Query DSL BNF SEARCH "model" IS "abc*" FROM "models" AND BELONGS TO "platformDeviceID" SEARCH "runtime" IS VALUES FROM 1. TO 100 FROM "devices" AND BELONGS TO "RSN resource ID" BELONGS TO "org resource ID" SEARCH "model" IS "sbc*" FROM "deices" ORDER BY "name" LIMIT 30 AND BELONGS TO "platformDeviceID" <sentence> ::= <query> [<query-filter>] [("AND" <sentence>)|("OR" <sentence>)] <query> ::= <search-query> | <association-query> | <collection-query> | <owner-query> <association-query> ::= "BELONGS TO" <resource-id> [<limit-parameter>] <owner-query> ::= "HAS" <resource-id> [<limit-parameter>] <collection-query> ::= "IN" <collection-id> <search_query> ::= "SEARCH" <field> (<term-query> | <range-query> | <fuzzy-query> | <time-query> | <geo-query> | <vertical-bounds> | <time-bounds>) "FROM" <index-name> [<query-parameter>]* <query-parameter> ::= <order-parameter> | <limit-parameter> | <offset-parameter> <offset-parameter> ::= "SKIP" <integer> <order-parameter> ::= "ORDER BY" <limited-string> <limit-parameter> ::= "LIMIT" <integer> <depth-parameter> ::= "DEPTH" <integer> <term-query> ::= "IS" <field-query> <fuzzy-query> ::= "LIKE" <field-query> <match-query> ::= "MATCH" <field-query> <field-query> ::= <wildcard-string> <range-query> ::= "VALUES" [<from-statement>] [<to-statement>] <time-bounds> ::= "TIMEBOUNDS" <from-statement> <to-statement> <vertical-bounds> ::= "VERTICAL" <from-statement> <to-statement> <time-query> ::= "TIME" [<from-statement>] [<to-statement>] <geo-query> ::= "GEO" ( <geo-distance> | <geo-bbox> ) <geo-distance> ::= "DISTANCE" <distance> "FROM" <coords> <from-statement> ::= "FROM" <number> <to-statement> ::= "TO" <number> <geo-bbox> ::= "BOX" "TOP-LEFT" <coords> "BOTTOM-RIGHT" <coords> <index-name> ::= <python-string> <collection-id> ::= <resource_id> <resource-id> ::= REGEX( "[a-zA-Z0-9]+" ) <query-filter> ::= "FILTER" <python-string> <distance> ::= <number> <units> <units> ::= ('km' | 'mi' ) <coords> ::= "LAT" <number> "LON" <number> <field> ::= <limited-string> | "*" <limited-string> ::= REGEX( "[a-zA-Z0-9_\.]+" ) <wildcard-string> ::= <python-string> <python-string> ::= REGEX( "[^"]+" ) <number> ::= <integer> | <double> <double> ::= 0-9 ('.' 0-9) <integer> ::= 0-9 ''' def __init__(self): self.json_query = {'query':{}, 'and': [], 'or': []} self.tokens = None #-------------------------------------------------------------------------------------- # <integer> ::= 0-9 # <double> ::= 0-9 ('.' 0-9) # <number> ::= <integer> | <double> #-------------------------------------------------------------------------------------- integer = Regex(r'-?[0-9]+') # Word matches space for some reason double = Regex(r'-?[0-9]+.?[0-9]*') number = double | integer #-------------------------------------------------------------------------------------- # <python-string> ::= (String surrounded by double-quotes) # <wildcard-string> ::= <python-string> # <limited-string> ::= '"' a..z A..Z 9..9 _ . '"' (alpha nums and ._ surrounded by double quotes) # <field> ::= <limited-string> | "*" # <coords> ::= "LAT" <number> "LON" <number> # <units> ::= ('km' | 'mi' | 'nm') # <distance> ::= REGEX(([0-9]*\.?[0-9]*)(km|mi|nm)?) #-------------------------------------------------------------------------------------- python_string = quotedString.setParseAction(removeQuotes) wildcard_string = python_string limited_string = Regex(r'("(?:[a-zA-Z0-9_\.])*"|\'(?:[a-zA-Z0-9_\.]*)\')').setParseAction(removeQuotes) field = limited_string ^ CaselessLiteral('"*"').setParseAction(removeQuotes) coords = CaselessLiteral("LAT") + number + CaselessLiteral("LON") + number units = CaselessLiteral('km') | CaselessLiteral('mi') distance = number + units distance.setParseAction( lambda x : self.frame.update({'dist' : float(x[0]), 'units' : x[1]})) #-------------------------------------------------------------------------------------- # Date #-------------------------------------------------------------------------------------- date = python_string #-------------------------------------------------------------------------------------- # <query-filter> ::= "FILTER" <python-string> # <index-name> ::= <python-string> # <resource-id> ::= '"' a..z A..Z 0..9 $ _ -'"' (alpha nums surrounded by double quotes) # <collection-id> ::= <resource-id> #-------------------------------------------------------------------------------------- query_filter = CaselessLiteral("FILTER") + python_string # Add the filter to the frame object query_filter.setParseAction(lambda x : self.frame.update({'filter' : x[1]})) index_name = MatchFirst(python_string) # Add the index to the frame object index_name.setParseAction(lambda x : self.frame.update({'index' : x[0]})) resource_id = Regex(r'("(?:[a-zA-Z0-9\$_-])*"|\'(?:[a-zA-Z0-9\$_-]*)\')').setParseAction(removeQuotes) collection_id = resource_id #-------------------------------------------------------------------------------------- # <from-statement> ::= "FROM" <number> # <to-statement> ::= "TO" <number> #-------------------------------------------------------------------------------------- from_statement = CaselessLiteral("FROM") + number from_statement.setParseAction(lambda x : self.frame.update({'from' : x[1]})) to_statement = CaselessLiteral("TO") + number to_statement.setParseAction(lambda x : self.frame.update({'to' : x[1]})) #-------------------------------------------------------------------------------------- # <date-from-statement> ::= "FROM" <date> # <date-to-statement> ::= "TO" <date> #-------------------------------------------------------------------------------------- date_from_statement = CaselessLiteral("FROM") + date date_from_statement.setParseAction(lambda x : self.frame.update({'from' : x[1]})) date_to_statement = CaselessLiteral("TO") + date date_to_statement.setParseAction(lambda x : self.frame.update({'to' : x[1]})) #-------------------------------------------------------------------------------------- # <time-query> ::= "TIME FROM" <date> "TO" <date> #-------------------------------------------------------------------------------------- time_query = CaselessLiteral("TIME") + Optional(date_from_statement) + Optional(date_to_statement) time_query.setParseAction(lambda x : self.time_frame()) # time.mktime(dateutil.parser.parse(x[2])), 'to':time.mktime(dateutil.parser.parse(x[4]))}})) #-------------------------------------------------------------------------------------- # <time-bounds> ::= "TIMEBOUNDS" <from-statement> <to-statement> #-------------------------------------------------------------------------------------- time_bounds = CaselessLiteral("TIMEBOUNDS") + date_from_statement + date_to_statement time_bounds.setParseAction(lambda x : self.time_bounds_frame()) #-------------------------------------------------------------------------------------- # <vertical-bounds> ::= "VERTICAL" <from-statement> <to-statement> #-------------------------------------------------------------------------------------- vertical_bounds = CaselessLiteral("VERTICAL") + from_statement + to_statement vertical_bounds.setParseAction(lambda x : self.vertical_bounds_frame()) #-------------------------------------------------------------------------------------- # <range-query> ::= "VALUES" [<from-statement>] [<to-statement>] #-------------------------------------------------------------------------------------- range_query = CaselessLiteral("VALUES") + Optional(from_statement) + Optional(to_statement) # Add the range to the frame object range_query.setParseAction(lambda x : self.range_frame()) #-------------------------------------------------------------------------------------- # <geo-distance> ::= "DISTANCE" <distance> "FROM" <coords> # <geo-bbox> ::= "BOX" "TOP-LEFT" <coords> "BOTTOM-RIGHT" <coords> #-------------------------------------------------------------------------------------- geo_distance = CaselessLiteral("DISTANCE") + distance + CaselessLiteral("FROM") + coords geo_distance.setParseAction(lambda x : self.frame.update({'lat': float(x[5]), 'lon':float(x[7])})) geo_bbox = CaselessLiteral("BOX") + CaselessLiteral("TOP-LEFT") + coords + CaselessLiteral("BOTTOM-RIGHT") + coords geo_bbox.setParseAction(lambda x : self.frame.update({'top_left':[float(x[5]),float(x[3])], 'bottom_right':[float(x[10]),float(x[8])]})) #-------------------------------------------------------------------------------------- # <field-query> ::= <wildcard-string> # <term-query> ::= "IS" <field-query> # <fuzzy-query> ::= "LIKE" <field-query> # <match-query> ::= "MATCH" <field-query> # <geo-query> ::= "GEO" ( <geo-distance> | <geo-bbox> ) #-------------------------------------------------------------------------------------- field_query = wildcard_string term_query = CaselessLiteral("IS") + field_query term_query.setParseAction(lambda x : self.frame.update({'value':x[1]})) geo_query = CaselessLiteral("GEO") + ( geo_distance | geo_bbox ) fuzzy_query = CaselessLiteral("LIKE") + field_query fuzzy_query.setParseAction(lambda x : self.frame.update({'fuzzy':x[1]})) match_query = CaselessLiteral("MATCH") + field_query match_query.setParseAction(lambda x : self.frame.update({'match':x[1]})) #-------------------------------------------------------------------------------------- # <limit-parameter> ::= "LIMIT" <integer> # <depth-parameter> ::= "DEPTH" <integer> # <order-parameter> ::= "ORDER" "BY" <limited-string> # <offset-parameter> ::= "SKIP" <integer> # <query-parameter> ::= <order-paramater> | <limit-parameter> #-------------------------------------------------------------------------------------- limit_parameter = CaselessLiteral("LIMIT") + integer limit_parameter.setParseAction(lambda x: self.json_query.update({'limit' : int(x[1])})) depth_parameter = CaselessLiteral("DEPTH") + integer depth_parameter.setParseAction(lambda x: self.frame.update({'depth' : int(x[1])})) order_parameter = CaselessLiteral("ORDER") + CaselessLiteral("BY") + limited_string order_parameter.setParseAction(lambda x: self.json_query.update({'order' : {x[2] : 'asc'}})) offset_parameter = CaselessLiteral("SKIP") + integer offset_parameter.setParseAction(lambda x : self.json_query.update({'skip' : int(x[1])})) query_parameter = limit_parameter | order_parameter | offset_parameter #-------------------------------------------------------------------------------------- # <search-query> ::= "SEARCH" <field> (<range-query> | <term-query> | <fuzzy-query> | <match-query> | <time-query> | <time-bounds> | <vertical-bounds> | <geo-query>) "FROM" <index-name> [<query-parameter>]* # <collection-query> ::= "IN <collection-id>" # <association-query> ::= "BELONGS TO" <resource-id> [ <depth-parameter> ] # <owner-query> ::= "HAS" <resource-id> [ <depth-parameter> ] # <query> ::= <search-query> | <association-query> | <collection-query> | <owner-query> #-------------------------------------------------------------------------------------- search_query = CaselessLiteral("SEARCH") + field + (range_query | term_query | fuzzy_query | match_query | vertical_bounds | time_bounds | time_query | geo_query) + CaselessLiteral("FROM") + index_name # Add the field to the frame object search_query.setParseAction(lambda x : self.frame.update({'field' : x[1]})) collection_query = CaselessLiteral("IN") + collection_id collection_query.setParseAction(lambda x : self.frame.update({'collection': x[1]})) association_query = CaselessLiteral("BELONGS") + CaselessLiteral("TO") + resource_id + Optional(depth_parameter) # Add the association to the frame object association_query.setParseAction(lambda x : self.frame.update({'association':x[2]})) owner_query = CaselessLiteral("HAS") + resource_id + Optional(depth_parameter) owner_query.setParseAction(lambda x : self.frame.update({'owner':x[1]})) query = search_query | association_query | collection_query | owner_query #-------------------------------------------------------------------------------------- # <primary-query> ::= <query> [<query-filter>] # <atom> ::= <query> # <intersection> ::= "AND" <atom> # <union> ::= "OR" <atom> # <sentence> ::= <primary-query> [<intersection>]* [<union>]* #-------------------------------------------------------------------------------------- primary_query = query + Optional(query_filter) # Set the primary query on the json_query to the frame and clear the frame primary_query.setParseAction(lambda x : self.push_frame()) atom = query intersection = CaselessLiteral("AND") + atom # Add an AND operation to the json_query and clear the frame intersection.setParseAction(lambda x : self.and_frame()) union = CaselessLiteral("OR") + atom # Add an OR operation to the json_query and clear the frame union.setParseAction(lambda x : self.or_frame()) self.sentence = primary_query + (intersection ^ union)*(0,None) + query_parameter*(0,None) def push_frame(self): self.json_query['query'] = self.frame self.frame = dict() def and_frame(self): if self.json_query.has_key('and'): self.json_query['and'].append(self.frame) else: self.json_query['and'] = [self.frame] self.frame = dict() def or_frame(self): if self.json_query.has_key('or'): self.json_query['or'].append(self.frame) else: self.json_query['or'] = [self.frame] self.frame = dict() def range_frame(self): if not 'range' in self.frame: self.frame['range'] = {} if 'from' in self.frame: self.frame['range']['from'] = float(self.frame['from']) del self.frame['from'] if 'to' in self.frame: self.frame['range']['to'] = float(self.frame['to']) del self.frame['to'] def vertical_bounds_frame(self): if not 'time' in self.frame: self.frame['vertical_bounds'] = {} if 'from' in self.frame: self.frame['vertical_bounds']['from'] = float(self.frame['from']) del self.frame['from'] if 'to' in self.frame: self.frame['vertical_bounds']['to'] = float(self.frame['to']) del self.frame['to'] def time_bounds_frame(self): if not 'time' in self.frame: self.frame['time_bounds'] = {} if 'from' in self.frame: self.frame['time_bounds']['from'] = self.frame['from'] del self.frame['from'] if 'to' in self.frame: self.frame['time_bounds']['to'] = self.frame['to'] del self.frame['to'] def time_frame(self): if not 'time' in self.frame: self.frame['time'] = {} if 'from' in self.frame: self.frame['time']['from'] = self.frame['from'] del self.frame['from'] if 'to' in self.frame: self.frame['time']['to'] = self.frame['to'] del self.frame['to'] def parse(self, s): ''' Parses string s and returns a json_query object, self.tokens is set to the tokens ''' self.json_query = {'query':{}, 'and': [], 'or': []} self.frame = {} try: self.tokens = self.sentence.parseString(s) except ParseException as e: raise BadRequest('%s' % e) return self.json_query #========================================= # Methods for checking the requests #========================================= @classmethod def query_is_fuzzy_search(cls, query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('index') and query.has_key('field') and query.has_key('fuzzy'): return True return False @classmethod def query_is_match_search(cls, query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('index') and query.has_key('field') and query.has_key('match'): return True return False @classmethod def query_is_term_search(cls, query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('index') and query.has_key('field') and query.has_key('value'): return True return False @classmethod def query_is_range_search(cls,query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('range') and isinstance(query['range'], dict) and query.has_key('index') and query.has_key('field'): return True return False @classmethod def query_is_geo_distance_search(cls,query=None): if not (query and isinstance(query,dict)): return False if query.has_key('dist') and query.has_key('lat') and query.has_key('lon') and query.has_key('units') and query.has_key('index') and query.has_key('field'): return True return False @classmethod def query_is_geo_bbox_search(cls,query=None): if not (query and isinstance(query,dict)): return False if query.has_key('top_left') and query.has_key('bottom_right') and isinstance(query['top_left'],list) and len(query['top_left'])==2 and isinstance(query['bottom_right'],list) and len(query['bottom_right'])==2 and query.has_key('field'): return True return False @classmethod def query_is_association_search(cls,query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('association'): return True return False @classmethod def query_is_owner_search(cls,query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('owner'): return True return False @classmethod def query_is_collection_search(cls, query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('collection'): return True return False @classmethod def query_is_time_search(cls,query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('time') and isinstance(query['time'], dict) and query.has_key('index') and query.has_key('field'): return True return False @classmethod def query_is_vertical_bounds_search(cls,query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('vertical_bounds') and isinstance(query['vertical_bounds'], dict) and query.has_key('index') and query.has_key('field'): return True return False @classmethod def query_is_time_bounds_search(cls,query=None): if not query: return False if not isinstance(query,dict): return False if query.has_key('time_bounds') and isinstance(query['time_bounds'], dict) and query.has_key('index') and query.has_key('field'): return True return False @classmethod def match(cls, event = None, query = None): field_val = getattr(event,query['field']) if cls.query_is_term_search(query): # This is a term search - always a string #@todo implement using regex to mimic lucene... if str(field_val) == query['value']: return True elif cls.query_is_range_search(query): # always a numeric value - float or int if (field_val >= query['range']['from']) and (field_val <= query['range']['to']): return True else: return False elif cls.query_is_geo_distance_search(query): #@todo - wait on this one... pass elif cls.query_is_geo_bbox_search(query): #@todo implement this now. # self.assertTrue(retval == {'and':[], 'or':[], 'query':{'field':'location', 'top_left':[0.0, 40.0], # 'bottom_right': [40.0, 0.0], 'index':'index'}}) # if field_val pass else: raise BadRequest("Missing parameters value and range for query: %s" % query) @classmethod def evaluate_condition(cls, event = None, query_dict = {} ): query = query_dict['query'] or_queries= query_dict['or'] and_queries = query_dict['and'] # if any of the queries in the list of 'or queries' gives a match, publish an event if or_queries: for or_query in or_queries: if cls.match(event, or_query): return True # if an 'and query' or a list of 'and queries' is provided, return if the match returns false for # any one of them if and_queries: for and_query in and_queries: if not cls.match(event, and_query): return False return cls.match(event, query)
ooici/coi-services
ion/services/dm/utility/query_language.py
Python
bsd-2-clause
23,425
# -*- coding:utf8 -*- from PyQt5.QtWidgets import * from widgets.playmode_label import PlaymodeSwitchLabel class StatusBar(QStatusBar): def __init__(self, parent=None): super().__init__(parent) self.desktop_mini_btn = QPushButton("mini") self.playmode_switch_label = PlaymodeSwitchLabel() self.desktop_mini_btn.setToolTip("切换到迷你窗口") self.desktop_mini_btn.setObjectName("show_desktop_mini") self.addPermanentWidget(self.playmode_switch_label) self.addPermanentWidget(self.desktop_mini_btn)
baifenbenniao/FeelUOwn
src/widgets/statusbar.py
Python
mit
568
import numpy as np import math import chainer import chainer.functions as F import chainer.links as L from chainer import cuda, optimizers, serializers, Variable from chainer import function from chainer.utils import type_check from .backwards import * def add_noise(h, test, sigma=0.2): xp = cuda.get_array_module(h.data) if test: return h else: return h + sigma * xp.random.randn(*h.data.shape) def selu(x): alpha = float(1.6732632423543772848170429916717) scale = float(1.0507009873554804934193349852946) return scale * F.elu(x, alpha = alpha) def weight_clipping(model, lower=-0.01, upper=0.01): for params in model.params(): params_clipped = F.clip(params, lower, upper) params.data = params_clipped.data class ResBlock(chainer.Chain): def __init__(self, ch, bn=True, activation=F.relu, k_size=3): self.bn = bn self.activation = activation layers = {} pad = k_size//2 layers['c0'] = L.Convolution2D(ch, ch, 3, 1, pad) layers['c1'] = L.Convolution2D(ch, ch, 3, 1, pad) if bn: layers['bn0'] = L.BatchNormalization(ch) layers['bn1'] = L.BatchNormalization(ch) super(ResBlock, self).__init__(**layers) def __call__(self, x, test): h = self.c0(x) if self.bn: h = self.bn0(h, test=test) h = self.activation(h) h = self.c1(h) if self.bn: h = self.bn1(h, test=test) return h + x class NNBlock(chainer.Chain): def __init__(self, ch0, ch1, \ nn='conv', \ norm='bn', \ activation=F.relu, \ dropout=False, \ noise=None, \ w_init=None, \ k_size = 3, \ normalize_input=False ): self.norm = norm self.normalize_input = normalize_input self.activation = activation self.dropout = dropout self.noise = noise self.nn = nn layers = {} if w_init == None: w = chainer.initializers.GlorotNormal() else: w = w_init if nn == 'down_conv': layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif nn == 'up_deconv': layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif nn == 'up_subpixel': pad = k_size//2 layers['c'] = L.Convolution2D(ch0, ch1*4, k_size, 1, pad, initialW=w) elif nn=='conv' or nn=='up_unpooling': pad = k_size//2 layers['c'] = L.Convolution2D(ch0, ch1, k_size, 1, pad, initialW=w) elif nn=='linear': layers['c'] = L.Linear(ch0, ch1, initialW=w) else: raise Exception("Cannot find method %s" % nn) if self.norm == 'bn': if self.noise: layers['n'] = L.BatchNormalization(ch1, use_gamma=False) else: layers['n'] = L.BatchNormalization(ch1) elif self.norm == 'ln': layers['n'] = L.LayerNormalization(ch1) super(NNBlock, self).__init__(**layers) def _do_normalization(self, x, test, retain_forward=False): if self.norm == 'bn': return self.n(x, test=test) elif self.norm == 'ln': y = self.n(x) if retain_forward: self.nx = y return y else: return x def _do_before_cal(self, x): if self.nn == 'up_unpooling': x = F.unpooling_2d(x, 2, 2, 0, cover_all=False) return x def _do_after_cal_0(self, x): if self.nn == 'up_subpixel': x = F.depth2space(x, 2) return x def _do_after_cal_1(self, x, test): if self.noise: x = add_noise(x, test=test) if self.dropout: x = F.dropout(x, train=not test) return x def __call__(self, x, test, retain_forward=False): if self.normalize_input: x = self._do_normalization(x, test, retain_forward=retain_forward) x = self._do_before_cal(x) x = self.c(x) x = self._do_after_cal_0(x) if not self.norm is None and not self.normalize_input: x = self._do_normalization(x, test, retain_forward=retain_forward) x = self._do_after_cal_1(x, test) if not self.activation is None: x = self.activation(x) if retain_forward: self.x = x return x def differentiable_backward(self, g): if self.normalize_input: raise NotImplementedError if self.activation is F.leaky_relu: g = backward_leaky_relu(self.x, g) elif self.activation is F.relu: g = backward_relu(self.x, g) elif self.activation is F.tanh: g = backward_tanh(self.x, g) elif self.activation is F.sigmoid: g = backward_sigmoid(self.x, g) elif not self.activation is None: raise NotImplementedError if self.norm == 'ln': g = backward_layernormalization(self.nx, g, self.n) elif not self.norm is None: raise NotImplementedError if self.nn == 'down_conv' or self.nn == 'conv': g = backward_convolution(None, g, self.c) elif self.nn == 'linear': g = backward_linear(None, g, self.c) elif self.nn == 'up_deconv': g = backward_deconvolution(None, g, self.c) else: raise NotImplementedError return g
Aixile/chainer-gan-experiments
common/models/ops.py
Python
mit
5,599
# Copyright 2016-present Facebook. All Rights Reserved. # # fastannotate: faster annotate implementation using linelog # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. """yet another annotate implementation that might be faster (EXPERIMENTAL) The fastannotate extension provides a 'fastannotate' command that makes use of the linelog data structure as a cache layer and is expected to be faster than the vanilla 'annotate' if the cache is present. In most cases, fastannotate requires a setup that mainbranch is some pointer that always moves forward, to be most efficient. Using fastannotate together with linkrevcache would speed up building the annotate cache greatly. Run "debugbuildlinkrevcache" before "debugbuildannotatecache". :: [fastannotate] # specify the main branch head. the internal linelog will only contain # the linear (ignoring p2) "mainbranch". since linelog cannot move # backwards without a rebuild, this should be something that always moves # forward, usually it is "master" or "@". mainbranch = master # fastannotate supports different modes to expose its feature. # a list of combination: # - fastannotate: expose the feature via the "fastannotate" command which # deals with everything in a most efficient way, and provides extra # features like --deleted etc. # - fctx: replace fctx.annotate implementation. note: # a. it is less efficient than the "fastannotate" command # b. it will make it practically impossible to access the old (disk # side-effect free) annotate implementation # c. it implies "hgweb". # - hgweb: replace hgweb's annotate implementation. conflict with "fctx". # (default: fastannotate) modes = fastannotate # default format when no format flags are used (default: number) defaultformat = changeset, user, date # serve the annotate cache via wire protocol (default: False) # tip: the .hg/fastannotate directory is portable - can be rsynced server = True # build annotate cache on demand for every client request (default: True) # disabling it could make server response faster, useful when there is a # cronjob building the cache. serverbuildondemand = True # update local annotate cache from remote on demand client = False # path to use when connecting to the remote server (default: default) remotepath = default # minimal length of the history of a file required to fetch linelog from # the server. (default: 10) clientfetchthreshold = 10 # for "fctx" mode, always follow renames regardless of command line option. # this is a BC with the original command but will reduced the space needed # for annotate cache, and is useful for client-server setup since the # server will only provide annotate cache with default options (i.e. with # follow). do not affect "fastannotate" mode. (default: True) forcefollow = True # for "fctx" mode, always treat file as text files, to skip the "isbinary" # check. this is consistent with the "fastannotate" command and could help # to avoid a file fetch if remotefilelog is used. (default: True) forcetext = True # use unfiltered repo for better performance. unfilteredrepo = True # sacrifice correctness in some corner cases for performance. it does not # affect the correctness of the annotate cache being built. the option # is experimental and may disappear in the future (default: False) perfhack = True """ # TODO from import: # * `branch` is probably the wrong term, throughout the code. # # * replace the fastannotate `modes` configuration with a collection # of booleans. # # * Use the templater instead of bespoke formatting # # * rename the config knob for updating the local cache from a remote server # # * revise wireprotocol for sharing annotate files # # * figure out a sensible default for `mainbranch` (with the caveat # that we probably also want to figure out a better term than # `branch`, see above) # # * format changes to the revmap file (maybe use length-encoding # instead of null-terminated file paths at least?) from __future__ import absolute_import from mercurial.i18n import _ from mercurial import ( error as hgerror, localrepo, registrar, ) from . import ( commands, protocol, ) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should # be specifying the version(s) of Mercurial they are tested with, or # leave the attribute unspecified. testedwith = b'ships-with-hg-core' cmdtable = commands.cmdtable configtable = {} configitem = registrar.configitem(configtable) configitem(b'fastannotate', b'modes', default=[b'fastannotate']) configitem(b'fastannotate', b'server', default=False) configitem(b'fastannotate', b'client', default=False) configitem(b'fastannotate', b'unfilteredrepo', default=True) configitem(b'fastannotate', b'defaultformat', default=[b'number']) configitem(b'fastannotate', b'perfhack', default=False) configitem(b'fastannotate', b'mainbranch') configitem(b'fastannotate', b'forcetext', default=True) configitem(b'fastannotate', b'forcefollow', default=True) configitem(b'fastannotate', b'clientfetchthreshold', default=10) configitem(b'fastannotate', b'serverbuildondemand', default=True) configitem(b'fastannotate', b'remotepath', default=b'default') def uisetup(ui): modes = set(ui.configlist(b'fastannotate', b'modes')) if b'fctx' in modes: modes.discard(b'hgweb') for name in modes: if name == b'fastannotate': commands.registercommand() elif name == b'hgweb': from . import support support.replacehgwebannotate() elif name == b'fctx': from . import support support.replacefctxannotate() commands.wrapdefault() else: raise hgerror.Abort(_(b'fastannotate: invalid mode: %s') % name) if ui.configbool(b'fastannotate', b'server'): protocol.serveruisetup(ui) def extsetup(ui): # fastannotate has its own locking, without depending on repo lock # TODO: avoid mutating this unless the specific repo has it enabled localrepo.localrepository._wlockfreeprefix.add(b'fastannotate/') def reposetup(ui, repo): if ui.configbool(b'fastannotate', b'client'): protocol.clientreposetup(ui, repo)
smmribeiro/intellij-community
plugins/hg4idea/testData/bin/hgext/fastannotate/__init__.py
Python
apache-2.0
6,571
# ballot/serializers.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- from .models import BallotItem, BallotReturned from rest_framework import serializers class BallotItemSerializer(serializers.ModelSerializer): class Meta: model = BallotItem fields = ('ballot_item_display_name', 'contest_office_we_vote_id', 'contest_measure_we_vote_id', 'google_ballot_placement', 'google_civic_election_id', 'local_ballot_order', 'measure_subtitle', 'polling_location_we_vote_id', ) class BallotReturnedSerializer(serializers.ModelSerializer): class Meta: model = BallotReturned fields = ('election_date', 'election_description_text', 'google_civic_election_id', 'latitude', 'longitude', 'normalized_line1', 'normalized_line2', 'normalized_city', 'normalized_state', 'normalized_zip', 'polling_location_we_vote_id', 'text_for_map_search', )
wevote/WebAppPublic
ballot/serializers.py
Python
bsd-3-clause
1,250
import Gears as gears from .. import * from .Base import * class Mandelbrot(Base) : def applyWithArgs( self, spass, functionName, *, color1 : '"bright" pattern color.' = 'white', color2 : '"dark" pattern color.' = 'black', direction : "The pattern direction in radians (or 'east', 'northeast', 'north', 'northwest', 'west', 'southwest', 'south', or 'southeast')." = 'east' ) : color1 = processColor(color1, self.tb) color2 = processColor(color2, self.tb) stimulus = spass.getStimulus() if max(color1) - min(color1) > 0.03 or max(color2) - min(color2) > 0.03: stimulus.enableColorMode() spass.setShaderColor( name = functionName+'_color1', red = color1[0], green=color1[1], blue=color1[2] ) spass.setShaderColor( name = functionName+'_color2', red = color2[0], green=color2[1], blue=color2[2] ) direction = processDirection(direction, self.tb) sequence = spass.getSequence() s = math.sin(direction) c = math.cos(direction) spass.setShaderFunction( name = functionName, src = self.glslEsc( ''' vec3 @<X>@ (vec2 x, float time){ vec2 c=x*0.002 /pow(2,time) + vec2(0.375920, 0.193); vec2 z=c; int i=0; for(; i<400; i++) { z = vec2( z.x*z.x - z.y*z.y + c.x, 2 * z.x*z.y + c.y ); if(dot(z,z) > 4) break; } return mix(`color1, `color2, i * 0.01 ); } ''').format( X=functionName ) )
szecsi/Gears
GearsPy/Project/Components/Figure/Mandelbrot.py
Python
gpl-2.0
1,884
# -*- coding: utf-8 -*- # Copyright (C) 2014-present Taiga Agile LLC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import uuid import csv import pytz from datetime import datetime, timedelta from urllib.parse import quote from unittest import mock from django.urls import reverse from taiga.base.utils import json from taiga.permissions.choices import MEMBERS_PERMISSIONS, ANON_PERMISSIONS from taiga.projects.occ import OCCResourceMixin from taiga.projects.userstories import services, models from .. import factories as f import pytest pytestmark = pytest.mark.django_db(transaction=True) def create_uss_fixtures(): data = {} data["project"] = f.ProjectFactory.create() project = data["project"] data["users"] = [f.UserFactory.create(is_superuser=True) for i in range(0, 3)] data["roles"] = [f.RoleFactory.create() for i in range(0, 3)] user_roles = zip(data["users"], data["roles"]) # Add membership fixtures [f.MembershipFactory.create(user=user, project=project, role=role) for (user, role) in user_roles] data["statuses"] = [f.UserStoryStatusFactory.create(project=project) for i in range(0, 4)] data["epics"] = [f.EpicFactory.create(project=project) for i in range(0, 3)] data["tags"] = ["test1test2test3", "test1", "test2", "test3"] # -------------------------------------------------------------------------------------------------------- # | US | Status | Owner | Assigned To | Assigned Users | Tags | Epic | Milestone | # |----#---------#--------#-------------#----------------#---------------------#-------------------------- # | 0 | status3 | user2 | None | None | tag1 | epic0 | None | # | 1 | status3 | user1 | None | user1 | tag2 | None | | # | 2 | status1 | user3 | None | None | tag1 tag2 | epic1 | None | # | 3 | status0 | user2 | None | None | tag3 | None | | # | 4 | status0 | user1 | user1 | None | tag1 tag2 tag3 | epic0 | None | # | 5 | status2 | user3 | user1 | None | tag3 | None | | # | 6 | status3 | user2 | user1 | None | tag1 tag2 | epic0 epic2 | None | # | 7 | status0 | user1 | user2 | None | tag3 | None | | # | 8 | status3 | user3 | user2 | None | tag1 | epic2 | None | # | 9 | status1 | user2 | user3 | user1 | tag0 | None | | # -------------------------------------------------------------------------------------------------------- (user1, user2, user3, ) = data["users"] (status0, status1, status2, status3 ) = data["statuses"] (epic0, epic1, epic2) = data["epics"] (tag0, tag1, tag2, tag3, ) = data["tags"] us0 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=None, status=status3, tags=[tag1], milestone=None) f.RelatedUserStory.create(user_story=us0, epic=epic0) us1 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=None, status=status3, tags=[tag2], assigned_users=[user1]) us2 = f.UserStoryFactory.create(project=project, owner=user3, assigned_to=None, status=status1, tags=[tag1, tag2], milestone=None) f.RelatedUserStory.create(user_story=us2, epic=epic1) us3 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=None, status=status0, tags=[tag3]) us4 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=user1, status=status0, tags=[tag1, tag2, tag3], milestone=None) f.RelatedUserStory.create(user_story=us4, epic=epic0) us5 = f.UserStoryFactory.create(project=project, owner=user3, assigned_to=user1, status=status2, tags=[tag3]) us6 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=user1, status=status3, tags=[tag1, tag2], milestone=None) f.RelatedUserStory.create(user_story=us6, epic=epic0) f.RelatedUserStory.create(user_story=us6, epic=epic2) us7 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=user2, status=status0, tags=[tag3]) us8 = f.UserStoryFactory.create(project=project, owner=user3, assigned_to=user2, status=status3, tags=[tag1], milestone=None) f.RelatedUserStory.create(user_story=us8, epic=epic2) us9 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=user3, status=status1, tags=[tag0], assigned_users=[user1]) data["userstories"] = [us0, us1, us2, us3, us4, us5, us6, us7, us8, us9] return data def test_get_userstories_from_bulk(): data = "User Story #1\nUser Story #2\n" userstories = services.get_userstories_from_bulk(data) assert len(userstories) == 2 assert userstories[0].subject == "User Story #1" assert userstories[1].subject == "User Story #2" def test_create_userstories_in_bulk(): data = "User Story #1\nUser Story #2\n" project = f.ProjectFactory.create() with mock.patch("taiga.projects.userstories.services.db") as db: userstories = services.create_userstories_in_bulk(data, project=project) db.save_in_bulk.assert_called_once_with(userstories, None, None) def test_update_userstories_order_in_bulk(): project = f.ProjectFactory.create() us1 = f.UserStoryFactory.create(project=project, backlog_order=1) us2 = f.UserStoryFactory.create(project=project, backlog_order=2) data = [{"us_id": us1.id, "order": 2}, {"us_id": us2.id, "order": 1}] with mock.patch("taiga.projects.userstories.services.db") as db: services.update_userstories_order_in_bulk(data, "backlog_order", project) db.update_attr_in_bulk_for_ids.assert_called_once_with({us2.id: 1, us1.id: 2}, "backlog_order", models.UserStory) def test_create_userstory_with_assign_to(client): user = f.UserFactory.create() user_watcher = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) f.MembershipFactory.create(project=project, user=user_watcher, is_admin=True) url = reverse("userstories-list") data = {"subject": "Test user story", "project": project.id, "assigned_to": user.id} client.login(user) response = client.json.post(url, json.dumps(data)) assert response.status_code == 201 assert response.data["assigned_to"] == user.id def test_create_userstory_with_assigned_users(client): user = f.UserFactory.create() user_watcher = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) f.MembershipFactory.create(project=project, user=user_watcher, is_admin=True) url = reverse("userstories-list") data = {"subject": "Test user story", "project": project.id, "assigned_users": [user.id, user_watcher.id]} client.login(user) json_data = json.dumps(data) response = client.json.post(url, json_data) assert response.status_code == 201 assert response.data["assigned_users"] == set([user.id, user_watcher.id]) def test_create_userstory_with_watchers(client): user = f.UserFactory.create() user_watcher = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) f.MembershipFactory.create(project=project, user=user_watcher, is_admin=True) url = reverse("userstories-list") data = {"subject": "Test user story", "project": project.id, "watchers": [user_watcher.id]} client.login(user) response = client.json.post(url, json.dumps(data)) assert response.status_code == 201 assert response.data["watchers"] == [] def test_create_userstory_without_status(client): user = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) status = f.UserStoryStatusFactory.create(project=project) project.default_us_status = status project.save() f.MembershipFactory.create(project=project, user=user, is_admin=True) url = reverse("userstories-list") data = {"subject": "Test user story", "project": project.id} client.login(user) response = client.json.post(url, json.dumps(data)) assert response.status_code == 201 assert response.data['status'] == project.default_us_status.id def test_create_userstory_without_default_values(client): user = f.UserFactory.create() project = f.ProjectFactory.create(owner=user, default_us_status=None) f.MembershipFactory.create(project=project, user=user, is_admin=True) url = reverse("userstories-list") data = {"subject": "Test user story", "project": project.id} client.login(user) response = client.json.post(url, json.dumps(data)) assert response.status_code == 201 assert response.data['status'] is None def test_api_delete_userstory(client): us = f.UserStoryFactory.create() f.MembershipFactory.create(project=us.project, user=us.owner, is_admin=True) url = reverse("userstories-detail", kwargs={"pk": us.pk}) client.login(us.owner) response = client.delete(url) assert response.status_code == 204 def test_api_filter_by_subject_or_ref(client): user = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) f.UserStoryFactory.create(project=project) f.UserStoryFactory.create(project=project, subject="some random subject") url = reverse("userstories-list") + "?q=some subject" client.login(project.owner) response = client.get(url) number_of_stories = len(response.data) assert response.status_code == 200 assert number_of_stories == 1, number_of_stories def test_api_create_in_bulk_with_status(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) url = reverse("userstories-bulk-create") data = { "bulk_stories": "Story #1\nStory #2", "project_id": project.id, "status_id": project.default_us_status.id } client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 200, response.data assert response.data[0]["status"] == project.default_us_status.id def test_api_create_in_bulk_with_invalid_status(client): project = f.create_project() status = f.UserStoryStatusFactory.create() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) url = reverse("userstories-bulk-create") data = { "bulk_stories": "Story #1\nStory #2", "project_id": project.id, "status_id": status.id } client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 400, response.data assert "status_id" in response.data def test_api_create_in_bulk_with_swimlane(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) swimlane = f.create_swimlane(project=project) url = reverse("userstories-bulk-create") data = { "bulk_stories": "Story #1\nStory #2", "project_id": project.id, "swimlane_id": project.default_swimlane_id, } client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 200, response.data assert response.data[1]["swimlane"] == project.default_swimlane_id def test_api_create_in_bulk_with_invalid_swimlane(client): project = f.create_project() swimlane = f.create_swimlane() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) url = reverse("userstories-bulk-create") data = { "bulk_stories": "Story #1\nStory #2", "project_id": project.id, "swimlane_id": swimlane.id, } client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 400, response.data assert "swimlane_id" in response.data def test_api_create_in_bulk_with_swimlane_unassigned(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) url = reverse("userstories-bulk-create") client.login(project.owner) data = { "bulk_stories": "Story #1\nStory #2", "project_id": project.id, "swimlane_id": None, } response = client.json.post(url, json.dumps(data)) assert response.status_code == 200, response.data assert response.data[1]["swimlane"] == None def test_api_update_orders_in_bulk(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) us1 = f.create_userstory(project=project) us2 = f.create_userstory(project=project) url1 = reverse("userstories-bulk-update-backlog-order") url3 = reverse("userstories-bulk-update-sprint-order") data = { "project_id": project.id, "bulk_stories": [{"us_id": us1.id, "order": 1}, {"us_id": us2.id, "order": 2}] } client.login(project.owner) response = client.json.post(url1, json.dumps(data)) assert response.status_code == 200, response.data response = client.json.post(url3, json.dumps(data)) assert response.status_code == 200, response.data def test_api_update_orders_in_bulk_invalid_userstories(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) us1 = f.create_userstory(project=project) us2 = f.create_userstory(project=project) us3 = f.create_userstory() url1 = reverse("userstories-bulk-update-backlog-order") url3 = reverse("userstories-bulk-update-sprint-order") data = { "project_id": project.id, "bulk_stories": [{"us_id": us1.id, "order": 1}, {"us_id": us2.id, "order": 2}, {"us_id": us3.id, "order": 3}] } client.login(project.owner) response = client.json.post(url1, json.dumps(data)) assert response.status_code == 400, response.data assert "bulk_stories" in response.data response = client.json.post(url3, json.dumps(data)) assert response.status_code == 400, response.data assert "bulk_stories" in response.data def test_api_update_orders_in_bulk_invalid_status(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) status = f.UserStoryStatusFactory.create() us1 = f.create_userstory(project=project, status=status) us2 = f.create_userstory(project=project, status=us1.status) us3 = f.create_userstory(project=project) url1 = reverse("userstories-bulk-update-backlog-order") url3 = reverse("userstories-bulk-update-sprint-order") data = { "project_id": project.id, "status_id": status.id, "bulk_stories": [{"us_id": us1.id, "order": 1}, {"us_id": us2.id, "order": 2}, {"us_id": us3.id, "order": 3}] } client.login(project.owner) response = client.json.post(url1, json.dumps(data)) assert response.status_code == 400, response.data assert "status_id" in response.data response = client.json.post(url3, json.dumps(data)) assert response.status_code == 400, response.data assert "status_id" in response.data def test_api_update_orders_in_bulk_invalid_milestione(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) mil1 = f.MilestoneFactory.create() us1 = f.create_userstory(project=project, milestone=mil1) us2 = f.create_userstory(project=project, milestone=mil1) us3 = f.create_userstory(project=project) url1 = reverse("userstories-bulk-update-backlog-order") url3 = reverse("userstories-bulk-update-sprint-order") data = { "project_id": project.id, "milestone_id": mil1.id, "bulk_stories": [{"us_id": us1.id, "order": 1}, {"us_id": us2.id, "order": 2}, {"us_id": us3.id, "order": 3}] } client.login(project.owner) response = client.json.post(url1, json.dumps(data)) assert response.status_code == 400, response.data assert "milestone_id" in response.data assert "bulk_stories" in response.data response = client.json.post(url3, json.dumps(data)) assert response.status_code == 400, response.data assert "milestone_id" in response.data assert "bulk_stories" in response.data def test_api_update_milestone_in_bulk(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) milestone = f.MilestoneFactory.create(project=project) us1 = f.create_userstory(project=project) t1 = f.create_task(user_story=us1, project=project) t2 = f.create_task(user_story=us1, project=project) us2 = f.create_userstory(project=project) t3 = f.create_task(user_story=us2, project=project) us3 = f.create_userstory(project=project, milestone=milestone, sprint_order=1) us4 = f.create_userstory(project=project, milestone=milestone, sprint_order=2) url = reverse("userstories-bulk-update-milestone") data = { "project_id": project.id, "milestone_id": milestone.id, "bulk_stories": [{"us_id": us1.id, "order": 2}, {"us_id": us2.id, "order": 3}] } client.login(project.owner) assert project.milestones.get(id=milestone.id).user_stories.count() == 2 response = client.json.post(url, json.dumps(data)) assert response.status_code == 204, response.data assert project.milestones.get(id=milestone.id).user_stories.count() == 4 uss_list = list(project.milestones.get(id=milestone.id).user_stories.order_by("sprint_order") .values_list("id", "sprint_order")) assert uss_list == [(us3.id, 1), (us1.id, 2), (us2.id,3), (us4.id,4)] tasks_list = list(project.milestones.get(id=milestone.id).tasks.order_by("id") .values_list("id", flat=True)) assert tasks_list == [t1.id, t2.id, t3.id] def test_api_update_milestone_in_bulk_invalid_milestone(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) us1 = f.create_userstory(project=project) us2 = f.create_userstory(project=project) m2 = f.MilestoneFactory.create() url = reverse("userstories-bulk-update-milestone") data = { "project_id": project.id, "milestone_id": m2.id, "bulk_stories": [{"us_id": us1.id, "order": 1}, {"us_id": us2.id, "order": 2}] } client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 400 assert "milestone_id" in response.data def test_api_update_milestone_in_bulk_invalid_userstories(client): project = f.create_project() f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) us1 = f.create_userstory(project=project) us2 = f.create_userstory() milestone = f.MilestoneFactory.create(project=project) url = reverse("userstories-bulk-update-milestone") data = { "project_id": project.id, "milestone_id": milestone.id, "bulk_stories": [{"us_id": us1.id, "order": 1}, {"us_id": us2.id, "order": 2}] } client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 400 assert "bulk_stories" in response.data def test_update_userstory_points(client): user1 = f.UserFactory.create() user2 = f.UserFactory.create() project = f.ProjectFactory.create(owner=user1) role1 = f.RoleFactory.create(project=project) role2 = f.RoleFactory.create(project=project) f.MembershipFactory.create(project=project, user=user1, role=role1, is_admin=True) f.MembershipFactory.create(project=project, user=user2, role=role2) points1 = f.PointsFactory.create(project=project, value=None) points2 = f.PointsFactory.create(project=project, value=1) points3 = f.PointsFactory.create(project=project, value=2) us = f.create_userstory(project=project, owner=user1, status__project=project, milestone__project=project) url = reverse("userstories-detail", args=[us.pk]) client.login(user1) # invalid role data = { "version": us.version, "points": { str(role1.pk): points1.pk, str(role2.pk): points2.pk, "222222": points3.pk } } response = client.json.patch(url, json.dumps(data)) assert response.status_code == 400 # invalid point data = { "version": us.version, "points": { str(role1.pk): 999999, str(role2.pk): points2.pk } } response = client.json.patch(url, json.dumps(data)) assert response.status_code == 400 # Api should save successful data = { "version": us.version, "points": { str(role1.pk): points3.pk, str(role2.pk): points2.pk } } response = client.json.patch(url, json.dumps(data)) assert response.data["points"][str(role1.pk)] == points3.pk def test_update_userstory_rolepoints_on_add_new_role(client): # This test is explicitly without assertions. It simple should # works without raising any exception. user1 = f.UserFactory.create() user2 = f.UserFactory.create() project = f.ProjectFactory.create(owner=user1) role1 = f.RoleFactory.create(project=project) f.MembershipFactory.create(project=project, user=user1, role=role1) f.PointsFactory.create(project=project, value=2) us = f.UserStoryFactory.create(project=project, owner=user1) # url = reverse("userstories-detail", args=[us.pk]) # client.login(user1) role2 = f.RoleFactory.create(project=project, computable=True) f.MembershipFactory.create(project=project, user=user2, role=role2) us.save() def test_archived_filter(client): user = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) f.UserStoryFactory.create(project=project) archived_status = f.UserStoryStatusFactory.create(is_archived=True) f.UserStoryFactory.create(status=archived_status, project=project) client.login(user) url = reverse("userstories-list") data = {} response = client.get(url, data) assert len(response.data) == 2 data = {"status__is_archived": 0} response = client.get(url, data) assert len(response.data) == 1 data = {"status__is_archived": 1} response = client.get(url, data) assert len(response.data) == 1 def test_filter_by_multiple_status(client): user = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) f.UserStoryFactory.create(project=project) us1 = f.UserStoryFactory.create(project=project) us2 = f.UserStoryFactory.create(project=project) client.login(user) url = "{}?status={},{}".format(reverse("userstories-list"), us1.status.id, us2.status.id) data = {} response = client.get(url, data) assert len(response.data) == 2 def test_get_total_points(client): project = f.ProjectFactory.create() role1 = f.RoleFactory.create(project=project) role2 = f.RoleFactory.create(project=project) points1 = f.PointsFactory.create(project=project, value=None) points2 = f.PointsFactory.create(project=project, value=1) points3 = f.PointsFactory.create(project=project, value=2) us_with_points = f.UserStoryFactory.create(project=project) us_with_points.role_points.all().delete() f.RolePointsFactory.create(user_story=us_with_points, role=role1, points=points2) f.RolePointsFactory.create(user_story=us_with_points, role=role2, points=points3) assert us_with_points.get_total_points() == 3.0 us_without_points = f.UserStoryFactory.create(project=project) us_without_points.role_points.all().delete() f.RolePointsFactory.create(user_story=us_without_points, role=role1, points=points1) f.RolePointsFactory.create(user_story=us_without_points, role=role2, points=points1) assert us_without_points.get_total_points() is None us_mixed = f.UserStoryFactory.create(project=project) us_mixed.role_points.all().delete() f.RolePointsFactory.create(user_story=us_mixed, role=role1, points=points1) f.RolePointsFactory.create(user_story=us_mixed, role=role2, points=points2) assert us_mixed.get_total_points() == 1.0 def test_api_filter_by_created_date(client): user = f.UserFactory(is_superuser=True) one_day_ago = datetime.now(pytz.utc) - timedelta(days=1) old_userstory = f.create_userstory(owner=user, created_date=one_day_ago) userstory = f.create_userstory(owner=user, subject="test") url = reverse("userstories-list") + "?created_date=%s" % ( quote(userstory.created_date.isoformat()) ) client.login(userstory.owner) response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert number_of_userstories == 1 assert response.data[0]["subject"] == userstory.subject def test_api_filter_by_created_date__lt(client): user = f.UserFactory(is_superuser=True) one_day_ago = datetime.now(pytz.utc) - timedelta(days=1) old_userstory = f.create_userstory( owner=user, created_date=one_day_ago, subject="old test" ) userstory = f.create_userstory(owner=user) url = reverse("userstories-list") + "?created_date__lt=%s" % ( quote(userstory.created_date.isoformat()) ) client.login(userstory.owner) response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert response.data[0]["subject"] == old_userstory.subject def test_api_filter_by_created_date__lte(client): user = f.UserFactory(is_superuser=True) one_day_ago = datetime.now(pytz.utc) - timedelta(days=1) old_userstory = f.create_userstory(owner=user, created_date=one_day_ago) userstory = f.create_userstory(owner=user) url = reverse("userstories-list") + "?created_date__lte=%s" % ( quote(userstory.created_date.isoformat()) ) client.login(userstory.owner) response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert number_of_userstories == 2 def test_api_filter_by_modified_date__gte(client): user = f.UserFactory(is_superuser=True) older_userstory = f.create_userstory(owner=user) userstory = f.create_userstory(owner=user, subject="test") # we have to refresh as it slightly differs userstory.refresh_from_db() assert older_userstory.modified_date < userstory.modified_date url = reverse("userstories-list") + "?modified_date__gte=%s" % ( quote(userstory.modified_date.isoformat()) ) client.login(userstory.owner) response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert number_of_userstories == 1 assert response.data[0]["subject"] == userstory.subject def test_api_filter_by_finish_date(client): user = f.UserFactory(is_superuser=True) one_day_later = datetime.now(pytz.utc) + timedelta(days=1) userstory = f.create_userstory(owner=user) userstory_to_finish = f.create_userstory( owner=user, finish_date=one_day_later, subject="test" ) assert userstory_to_finish.finish_date url = reverse("userstories-list") + "?finish_date__gte=%s" % ( quote(userstory_to_finish.finish_date.isoformat()) ) client.login(userstory.owner) response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert number_of_userstories == 1 assert response.data[0]["subject"] == userstory_to_finish.subject def test_api_filter_by_assigned_users(client): user = f.UserFactory(is_superuser=True) user2 = f.UserFactory(is_superuser=True) project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(user=user, project=project) f.create_userstory(owner=user, subject="test 2 users", assigned_to=user, assigned_users=[user.id, user2.id], project=project) f.create_userstory( owner=user, subject="test 1 user", assigned_to=user, assigned_users=[user.id], project=project ) url = reverse("userstories-list") + "?assigned_users=%s" % (user.id) client.login(user) response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert number_of_userstories == 2 def test_api_filter_by_role(client): project = f.ProjectFactory.create() role1 = f.RoleFactory.create() user = f.UserFactory(is_superuser=True) user2 = f.UserFactory(is_superuser=True) f.MembershipFactory.create(user=user2, project=project, role=role1) userstory = f.create_userstory(owner=user, subject="test 2 users", assigned_to=user, assigned_users=[user.id, user2.id], project=project) f.create_userstory( owner=user, subject="test 1 user", assigned_to=user, assigned_users=[user.id], project=project ) url = reverse("userstories-list") + "?role=%s" % (role1.id) client.login(userstory.owner) response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert number_of_userstories == 1 @pytest.mark.parametrize("field_name", ["estimated_start", "estimated_finish"]) def test_api_filter_by_milestone__estimated_start_and_end(client, field_name): user = f.UserFactory(is_superuser=True) userstory = f.create_userstory(owner=user) assert userstory.milestone assert hasattr(userstory.milestone, field_name) date = getattr(userstory.milestone, field_name) before = (date - timedelta(days=1)).isoformat() after = (date + timedelta(days=1)).isoformat() client.login(userstory.owner) full_field_name = "milestone__" + field_name expections = { full_field_name + "__gte=" + quote(before): 1, full_field_name + "__gte=" + quote(after): 0, full_field_name + "__lte=" + quote(before): 0, full_field_name + "__lte=" + quote(after): 1 } for param, expection in expections.items(): url = reverse("userstories-list") + "?" + param response = client.get(url) number_of_userstories = len(response.data) assert response.status_code == 200 assert number_of_userstories == expection, param if number_of_userstories > 0: assert response.data[0]["subject"] == userstory.subject def test_api_filters_data(client): data = create_uss_fixtures() project = data["project"] (user1, user2, user3, ) = data["users"] (status0, status1, status2, status3, ) = data["statuses"] (epic0, epic1, epic2, ) = data["epics"] (tag0, tag1, tag2, tag3, ) = data["tags"] url = reverse("userstories-filters-data") + "?project={}".format(project.id) client.login(user1) # Check filter fields response = client.get(url) assert response.status_code == 200 owners = next(filter(lambda i: i['id'] == user1.id, response.data["owners"])) assert len(owners) == 6 assert 'id' in owners assert 'count' in owners assert 'full_name' in owners assert 'photo' in owners assert 'big_photo' in owners assert 'gravatar_id' in owners assigned_users = next(filter(lambda i: i['id'] == user1.id, response.data["assigned_users"])) assert len(assigned_users) == 6 assert 'id' in assigned_users assert 'count' in assigned_users assert 'full_name' in assigned_users assert 'photo' in assigned_users assert 'big_photo' in assigned_users assert 'gravatar_id' in assigned_users # No filter response = client.get(url) assert response.status_code == 200 assert next(filter(lambda i: i['id'] == user1.id, response.data["owners"]))["count"] == 3 assert next(filter(lambda i: i['id'] == user2.id, response.data["owners"]))["count"] == 4 assert next(filter(lambda i: i['id'] == user3.id, response.data["owners"]))["count"] == 3 assert next(filter(lambda i: i['id'] is None, response.data["assigned_to"]))["count"] == 4 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_to"]))["count"] == 3 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_to"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user3.id, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_users"]))["count"] == 5 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_users"]))["count"] == 2 assert next(filter(lambda i: i['id'] == status0.id, response.data["statuses"]))["count"] == 3 assert next(filter(lambda i: i['id'] == status1.id, response.data["statuses"]))["count"] == 2 assert next(filter(lambda i: i['id'] == status2.id, response.data["statuses"]))["count"] == 1 assert next(filter(lambda i: i['id'] == status3.id, response.data["statuses"]))["count"] == 4 assert next(filter(lambda i: i['name'] == tag0, response.data["tags"]))["count"] == 1 assert next(filter(lambda i: i['name'] == tag1, response.data["tags"]))["count"] == 5 assert next(filter(lambda i: i['name'] == tag2, response.data["tags"]))["count"] == 4 assert next(filter(lambda i: i['name'] == tag3, response.data["tags"]))["count"] == 4 assert next(filter(lambda i: i['id'] is None, response.data["epics"]))["count"] == 5 assert next(filter(lambda i: i['id'] == epic0.id, response.data["epics"]))["count"] == 3 assert next(filter(lambda i: i['id'] == epic1.id, response.data["epics"]))["count"] == 1 assert next(filter(lambda i: i['id'] == epic2.id, response.data["epics"]))["count"] == 2 # Filter ((status0 or status3) response = client.get(url + "&status={},{}".format(status3.id, status0.id)) assert response.status_code == 200 assert next(filter(lambda i: i['id'] == user1.id, response.data["owners"]))["count"] == 3 assert next(filter(lambda i: i['id'] == user2.id, response.data["owners"]))["count"] == 3 assert next(filter(lambda i: i['id'] == user3.id, response.data["owners"]))["count"] == 1 assert next(filter(lambda i: i['id'] is None, response.data["assigned_to"]))["count"] == 3 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_to"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_to"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user3.id, response.data["assigned_to"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status0.id, response.data["statuses"]))["count"] == 3 assert next(filter(lambda i: i['id'] == status1.id, response.data["statuses"]))["count"] == 2 assert next(filter(lambda i: i['id'] == status2.id, response.data["statuses"]))["count"] == 1 assert next(filter(lambda i: i['id'] == status3.id, response.data["statuses"]))["count"] == 4 assert next(filter(lambda i: i['name'] == tag0, response.data["tags"]))["count"] == 0 assert next(filter(lambda i: i['name'] == tag1, response.data["tags"]))["count"] == 4 assert next(filter(lambda i: i['name'] == tag2, response.data["tags"]))["count"] == 3 assert next(filter(lambda i: i['name'] == tag3, response.data["tags"]))["count"] == 3 assert next(filter(lambda i: i['id'] is None, response.data["epics"]))["count"] == 3 assert next(filter(lambda i: i['id'] == epic0.id, response.data["epics"]))["count"] == 3 assert next(filter(lambda i: i['id'] == epic1.id, response.data["epics"]))["count"] == 0 assert next(filter(lambda i: i['id'] == epic2.id, response.data["epics"]))["count"] == 2 # Filter ((tag1 and tag2) and (user1 or user2)) response = client.get(url + "&tags={},{}&owner={},{}".format(tag1, tag2, user1.id, user2.id)) assert response.status_code == 200 assert next(filter(lambda i: i['id'] == user1.id, response.data["owners"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user2.id, response.data["owners"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user3.id, response.data["owners"]))["count"] == 2 assert next(filter(lambda i: i['id'] is None, response.data["assigned_to"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_to"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_to"]))["count"] == 0 assert next(filter(lambda i: i['id'] == user3.id, response.data["assigned_to"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status0.id, response.data["statuses"]))["count"] == 1 assert next(filter(lambda i: i['id'] == status1.id, response.data["statuses"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status2.id, response.data["statuses"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status3.id, response.data["statuses"]))["count"] == 3 assert next(filter(lambda i: i['name'] == tag0, response.data["tags"]))["count"] == 1 assert next(filter(lambda i: i['name'] == tag1, response.data["tags"]))["count"] == 3 assert next(filter(lambda i: i['name'] == tag2, response.data["tags"]))["count"] == 3 assert next(filter(lambda i: i['name'] == tag3, response.data["tags"]))["count"] == 3 assert next(filter(lambda i: i['id'] is None, response.data["epics"]))["count"] == 1 assert next(filter(lambda i: i['id'] == epic0.id, response.data["epics"]))["count"] == 3 assert next(filter(lambda i: i['id'] == epic1.id, response.data["epics"]))["count"] == 0 assert next(filter(lambda i: i['id'] == epic2.id, response.data["epics"]))["count"] == 1 # Filter (epic0 epic2) response = client.get(url + "&epic={},{}".format(epic0.id, epic2.id)) assert response.status_code == 200 assert next(filter(lambda i: i['id'] == user1.id, response.data["owners"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user2.id, response.data["owners"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user3.id, response.data["owners"]))["count"] == 1 assert next(filter(lambda i: i['id'] is None, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_to"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user3.id, response.data["assigned_to"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status0.id, response.data["statuses"]))["count"] == 1 assert next(filter(lambda i: i['id'] == status1.id, response.data["statuses"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status2.id, response.data["statuses"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status3.id, response.data["statuses"]))["count"] == 3 assert next(filter(lambda i: i['name'] == tag0, response.data["tags"]))["count"] == 0 assert next(filter(lambda i: i['name'] == tag1, response.data["tags"]))["count"] == 4 assert next(filter(lambda i: i['name'] == tag2, response.data["tags"]))["count"] == 2 assert next(filter(lambda i: i['name'] == tag3, response.data["tags"]))["count"] == 1 assert next(filter(lambda i: i['id'] is None, response.data["epics"]))["count"] == 5 assert next(filter(lambda i: i['id'] == epic0.id, response.data["epics"]))["count"] == 3 assert next(filter(lambda i: i['id'] == epic1.id, response.data["epics"]))["count"] == 1 assert next(filter(lambda i: i['id'] == epic2.id, response.data["epics"]))["count"] == 2 @pytest.mark.parametrize("filter_name,collection,expected,exclude_expected,is_text", [ ('status', 'statuses', 3, 7, False), ('tags', 'tags', 1, 9, True), ('owner', 'users', 3, 7, False), ('role', 'roles', 5, 5, False), ('assigned_users', 'users', 5, 5, False), ]) def test_api_filters(client, filter_name, collection, expected, exclude_expected, is_text): data = create_uss_fixtures() project = data["project"] options = data[collection] client.login(data["users"][0]) if is_text: param = options[0] else: param = options[0].id # include test url = "{}?project={}&&{}={}".format(reverse('userstories-list'), project.id, filter_name, param) response = client.get(url) assert response.status_code == 200 assert len(response.data) == expected assert "taiga-info-backlog-total-userstories" in response["access-control-expose-headers"] assert response.has_header("Taiga-Info-Backlog-Total-Userstories") == False # exclude test url = "{}?project={}&&exclude_{}={}".format(reverse('userstories-list'), project.id, filter_name, param) response = client.get(url) assert response.status_code == 200 assert len(response.data) == exclude_expected assert "taiga-info-backlog-total-userstories" in response["access-control-expose-headers"] assert response.has_header("Taiga-Info-Backlog-Total-Userstories") == False @pytest.mark.parametrize("filter_name,collection,expected,exclude_expected,backlog_total_uss,is_text", [ ('status', 'statuses', 1, 4, 5, False), ('tags', 'tags', 0, 5, 5, True), ('owner', 'users', 1, 4, 5, False), ('role', 'roles', 2, 3, 5, False), ('assigned_users', 'users', 2, 3, 5, False), ]) def test_api_filters_for_backlog(client, filter_name, collection, expected, exclude_expected, backlog_total_uss, is_text): data = create_uss_fixtures() project = data["project"] options = data[collection] client.login(data["users"][0]) if is_text: param = options[0] else: param = options[0].id # include test url = "{}?project={}&milestone=null&{}={}".format(reverse('userstories-list'), project.id, filter_name, param) response = client.get(url) assert response.status_code == 200 assert len(response.data) == expected assert "taiga-info-backlog-total-userstories" in response["access-control-expose-headers"] assert response.has_header("Taiga-Info-Backlog-Total-Userstories") == True assert response["taiga-info-backlog-total-userstories"] == f"{backlog_total_uss}" # exclude test url = "{}?project={}&milestone=null&exclude_{}={}".format(reverse('userstories-list'), project.id, filter_name, param) response = client.get(url) assert response.status_code == 200 assert len(response.data) == exclude_expected assert "taiga-info-backlog-total-userstories" in response["access-control-expose-headers"] assert response.has_header("Taiga-Info-Backlog-Total-Userstories") == True assert response["taiga-info-backlog-total-userstories"] == f"{backlog_total_uss}" def test_api_filters_tags_or_operator(client): data = create_uss_fixtures() project = data["project"] client.login(data["users"][0]) tags = data["tags"] url = "{}?project={}&tags={},{}".format(reverse('userstories-list'), project.id, tags[0], tags[2]) response = client.get(url) assert response.status_code == 200 assert len(response.data) == 5 def test_api_filters_data_with_assigned_users(client): project = f.ProjectFactory.create() user1 = f.UserFactory.create(is_superuser=True) f.MembershipFactory.create(user=user1, project=project) user2 = f.UserFactory.create(is_superuser=True) f.MembershipFactory.create(user=user2, project=project) user3 = f.UserFactory.create(is_superuser=True) f.MembershipFactory.create(user=user3, project=project) status0 = f.UserStoryStatusFactory.create(project=project) status1 = f.UserStoryStatusFactory.create(project=project) status2 = f.UserStoryStatusFactory.create(project=project) status3 = f.UserStoryStatusFactory.create(project=project) # ----------------------------------------------------------- # | US | Status | Owner | Assigned To | Assigned Users | # |-------#---------#--------#-------------#----------------- # | 0 | status3 | user2 | user2 | user2, user3 | # | 1 | status3 | user1 | None | None | # | 2 | status1 | user3 | None | None | # | 3 | status0 | user2 | None | None | # | 4 | status0 | user1 | user1 | user1 | # ----------------------------------------------------------- us0 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=user2, assigned_users=[user2, user3], status=status3) f.RelatedUserStory.create(user_story=us0) us1 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=None, status=status3, ) us2 = f.UserStoryFactory.create(project=project, owner=user3, assigned_to=None, status=status1) f.RelatedUserStory.create(user_story=us2) us3 = f.UserStoryFactory.create(project=project, owner=user2, assigned_to=None, status=status0) us4 = f.UserStoryFactory.create(project=project, owner=user1, assigned_to=user1, assigned_users=[user1], status=status0) url = reverse("userstories-filters-data") + "?project={}".format(project.id) client.login(user1) # Check filter fields response = client.get(url) assert response.status_code == 200 owners = next(filter(lambda i: i['id'] == user1.id, response.data["owners"])) assert len(owners) == 6 assert 'id' in owners assert 'count' in owners assert 'full_name' in owners assert 'photo' in owners assert 'big_photo' in owners assert 'gravatar_id' in owners assigned_users = next(filter(lambda i: i['id'] == user1.id, response.data["assigned_users"])) assert len(assigned_users) == 6 assert 'id' in assigned_users assert 'count' in assigned_users assert 'full_name' in assigned_users assert 'photo' in assigned_users assert 'big_photo' in assigned_users assert 'gravatar_id' in assigned_users # No filter response = client.get(url) assert response.status_code == 200 assert next(filter(lambda i: i['id'] == user1.id, response.data["owners"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user2.id, response.data["owners"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user3.id, response.data["owners"]))["count"] == 1 assert next(filter(lambda i: i['id'] is None, response.data["assigned_to"]))["count"] == 3 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user3.id, response.data["assigned_to"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status0.id, response.data["statuses"]))["count"] == 2 assert next(filter(lambda i: i['id'] == status1.id, response.data["statuses"]))["count"] == 1 assert next(filter(lambda i: i['id'] == status2.id, response.data["statuses"]))["count"] == 0 assert next(filter(lambda i: i['id'] == status3.id, response.data["statuses"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_users"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_users"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user3.id, response.data["assigned_users"]))["count"] == 1 def test_api_filters_data_roles_with_assigned_users(client): project = f.ProjectFactory.create() role1 = f.RoleFactory.create(project=project) role2 = f.RoleFactory.create(project=project) user1 = f.UserFactory.create(is_superuser=True) f.MembershipFactory.create(user=user1, project=project, role=role1) user2 = f.UserFactory.create(is_superuser=True) f.MembershipFactory.create(user=user2, project=project, role=role2) user3 = f.UserFactory.create(is_superuser=True) f.MembershipFactory.create(user=user3, project=project, role=role1) # ---------------------------------------------------------------- # | US | Owner | Assigned To | Assigned Users | Role | # |-------#--------#-------------#----------------#--------------- # | 0 | user2 | user2 | user2, user3 | role2, role1 | # | 1 | user1 | None | None | None | # | 2 | user1 | user1 | user1 | role1 | # ---------------------------------------------------------------- us0 = f.UserStoryFactory.create(project=project, owner=user2, status__project=project, assigned_to=user2, assigned_users=[user2, user3],) f.RelatedUserStory.create(user_story=us0) us1 = f.UserStoryFactory.create(project=project, owner=user1, status__project=project, assigned_to=None) us2 = f.UserStoryFactory.create(project=project, owner=user1, status__project=project, assigned_to=user1, assigned_users=[user1],) url = reverse("userstories-filters-data") + "?project={}".format(project.id) client.login(user1) # No filter response = client.get(url) assert response.status_code == 200 assert next(filter(lambda i: i['id'] == user1.id, response.data["owners"]))["count"] == 2 assert next(filter(lambda i: i['id'] == user2.id, response.data["owners"]))["count"] == 1 assert next(filter(lambda i: i['id'] is None, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_to"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user1.id, response.data["assigned_users"]))["count"] == 1 assert next(filter(lambda i: i['id'] == user2.id, response.data["assigned_users"]))["count"] == 1 assert next(filter(lambda i: i['id'] == role1.id, response.data["roles"]))["count"] == 2 assert next(filter(lambda i: i['id'] == role2.id, response.data["roles"]))["count"] == 1 def test_get_invalid_csv(client): url = reverse("userstories-csv") response = client.get(url) assert response.status_code == 404 response = client.get("{}?uuid={}".format(url, "not-valid-uuid")) assert response.status_code == 404 def test_get_valid_csv(client): url = reverse("userstories-csv") project = f.ProjectFactory.create(userstories_csv_uuid=uuid.uuid4().hex) response = client.get( "{}?uuid={}".format(url, project.userstories_csv_uuid)) assert response.status_code == 200 def test_custom_fields_csv_generation(): project = f.ProjectFactory.create(userstories_csv_uuid=uuid.uuid4().hex) attr = f.UserStoryCustomAttributeFactory.create(project=project, name="attr1", description="desc") us = f.UserStoryFactory.create(project=project) attr_values = us.custom_attributes_values attr_values.attributes_values = {str(attr.id): "val1"} attr_values.save() queryset = project.user_stories.all() data = services.userstories_to_csv(project, queryset) data.seek(0) reader = csv.reader(data) row = next(reader) assert row.pop() == attr.name row = next(reader) assert row.pop() == "val1" def test_update_userstory_respecting_watchers(client): watching_user = f.create_user() project = f.ProjectFactory.create() us = f.UserStoryFactory.create(project=project, status__project=project, milestone__project=project) us.add_watcher(watching_user) f.MembershipFactory.create(project=us.project, user=us.owner, is_admin=True) f.MembershipFactory.create(project=us.project, user=watching_user) client.login(user=us.owner) url = reverse("userstories-detail", kwargs={"pk": us.pk}) data = {"subject": "Updating test", "version": 1} response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200 assert response.data["subject"] == "Updating test" assert response.data["watchers"] == [watching_user.id] def test_update_userstory_update_watchers(client): watching_user = f.create_user() project = f.ProjectFactory.create() us = f.UserStoryFactory.create(project=project, status__project=project, milestone__project=project) f.MembershipFactory.create(project=us.project, user=us.owner, is_admin=True) f.MembershipFactory.create(project=us.project, user=watching_user) client.login(user=us.owner) url = reverse("userstories-detail", kwargs={"pk": us.pk}) data = {"watchers": [watching_user.id], "version": 1} response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200 assert response.data["watchers"] == [watching_user.id] watcher_ids = list(us.get_watchers().values_list("id", flat=True)) assert watcher_ids == [watching_user.id] def test_update_userstory_remove_watchers(client): watching_user = f.create_user() project = f.ProjectFactory.create() us = f.UserStoryFactory.create(project=project, status__project=project, milestone__project=project) us.add_watcher(watching_user) f.MembershipFactory.create(project=us.project, user=us.owner, is_admin=True) f.MembershipFactory.create(project=us.project, user=watching_user) client.login(user=us.owner) url = reverse("userstories-detail", kwargs={"pk": us.pk}) data = {"watchers": [], "version": 1} response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200 assert response.data["watchers"] == [] watcher_ids = list(us.get_watchers().values_list("id", flat=True)) assert watcher_ids == [] def test_update_userstory_update_tribe_gig(client): project = f.ProjectFactory.create() us = f.UserStoryFactory.create(project=project, status__project=project, milestone__project=project) f.MembershipFactory.create(project=us.project, user=us.owner, is_admin=True) url = reverse("userstories-detail", kwargs={"pk": us.pk}) data = { "tribe_gig": { "id": 2, "title": "This is a gig test title" }, "version": 1 } client.login(user=us.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200 assert response.data["tribe_gig"] == data["tribe_gig"] def test_get_user_stories_including_tasks(client): user = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) user_story = f.UserStoryFactory.create(project=project) f.TaskFactory.create(user_story=user_story) url = reverse("userstories-list") client.login(project.owner) response = client.get(url) assert response.status_code == 200 assert response.data[0].get("tasks") == [] url = reverse("userstories-list") + "?include_tasks=1" response = client.get(url) assert response.status_code == 200 assert len(response.data[0].get("tasks")) == 1 def test_get_user_stories_including_attachments(client): user = f.UserFactory.create() project = f.ProjectFactory.create(owner=user) f.MembershipFactory.create(project=project, user=user, is_admin=True) user_story = f.UserStoryFactory.create(project=project) f.UserStoryAttachmentFactory(project=project, content_object=user_story) url = reverse("userstories-list") client.login(project.owner) response = client.get(url) assert response.status_code == 200 assert response.data[0].get("attachments") == [] url = reverse("userstories-list") + "?include_attachments=1" response = client.get(url) assert response.status_code == 200 assert len(response.data[0].get("attachments")) == 1 def test_api_validator_assigned_to_when_update_userstories(client): project = f.create_project(anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS))) project_member_owner = f.MembershipFactory.create(project=project, user=project.owner, is_admin=True, role__project=project, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) project_member = f.MembershipFactory.create(project=project, is_admin=True, role__project=project, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) project_no_member = f.MembershipFactory.create(is_admin=True) userstory = f.create_userstory(project=project, owner=project.owner, status=project.us_statuses.all()[0]) url = reverse('userstories-detail', kwargs={"pk": userstory.pk}) # assign data = { "assigned_to": project_member.user.id, } with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"): client.login(project.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200, response.data assert "assigned_to" in response.data assert response.data["assigned_to"] == project_member.user.id # unassign data = { "assigned_to": None, } with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"): client.login(project.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200, response.data assert "assigned_to" in response.data assert response.data["assigned_to"] == None # assign to invalid user data = { "assigned_to": project_no_member.user.id, } with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"): client.login(project.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 400, response.data def test_api_validator_assigned_to_when_create_userstories(client): project = f.create_project(anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS))) project_member_owner = f.MembershipFactory.create(project=project, user=project.owner, is_admin=True, role__project=project, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) project_member = f.MembershipFactory.create(project=project, is_admin=True, role__project=project, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) project_no_member = f.MembershipFactory.create(is_admin=True) url = reverse('userstories-list') # assign data = { "subject": "test", "project": project.id, "assigned_to": project_member.user.id, } with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"): client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 201, response.data assert "assigned_to" in response.data assert response.data["assigned_to"] == project_member.user.id # unassign data = { "subject": "test", "project": project.id, "assigned_to": None, } with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"): client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 201, response.data assert "assigned_to" in response.data assert response.data["assigned_to"] == None # assign to invalid user data = { "subject": "test", "project": project.id, "assigned_to": project_no_member.user.id, } with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"): client.login(project.owner) response = client.json.post(url, json.dumps(data)) assert response.status_code == 400, response.data def test_update_userstory_backlog_order(client): user1 = f.UserFactory.create() project = f.create_project(owner=user1) f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) us1 = f.create_userstory(project=project, owner=user1, status__project=project, milestone=None, backlog_order=0) us2 = f.create_userstory(project=project, owner=user1, status__project=project, milestone=None, backlog_order=1) us3 = f.create_userstory(project=project, owner=user1, status__project=project, milestone=None, backlog_order=2) us4 = f.create_userstory(project=project, owner=user1, status__project=project, milestone=None, backlog_order=3) url = reverse("userstories-detail", args=[us4.pk]) data = { "version": us1.version, "backlog_order": 1 } client.login(project.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200, response.data assert 1 == response.data['backlog_order'] url = reverse("userstories-list") + "?milestone=null&project={}".format(project.id) client.login(project.owner) response = client.get(url) user_stories = response.data number_of_stories = len(user_stories) assert response.status_code == 200 assert number_of_stories == 4, number_of_stories assert 0 == user_stories[0]["backlog_order"] assert us1.id == user_stories[0]["id"] assert us4.id == user_stories[1]["id"] assert 1 == user_stories[1]["backlog_order"] assert us2.id == user_stories[2]["id"] assert 2 == user_stories[2]["backlog_order"] assert us3.id == user_stories[3]["id"] assert 3 == user_stories[3]["backlog_order"] def test_api_update_change_kanban_order_if_project_change(client): user1 = f.UserFactory.create() project1 = f.create_project(owner=user1) f.MembershipFactory.create(project=project1, user=project1.owner, is_admin=True) project2 = f.create_project(owner=user1) f.MembershipFactory.create(project=project2, user=project2.owner, is_admin=True) us = f.create_userstory(project=project1, owner=user1, status=project1.default_us_status) url = reverse("userstories-detail", args=[us.pk]) data = { "version": us.version, "project": project2.id, "status": project2.default_us_status.id, "milestone": None, } client.login(project1.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200, response.data assert us.kanban_order < response.data["kanban_order"] assert project2.id == response.data["project"] def test_api_update_change_kanban_order_if_status_change(client): user1 = f.UserFactory.create() project = f.create_project(owner=user1) f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) status1 = f.UserStoryStatusFactory(project=project) status2 = f.UserStoryStatusFactory(project=project) us = f.create_userstory(project=project, owner=user1, status=status1, swimlane=project.default_swimlane) url = reverse("userstories-detail", args=[us.pk]) data = { "version": us.version, "status": status2.id } client.login(project.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200, response.data assert us.kanban_order < response.data["kanban_order"] assert status2.id == response.data["status"] def test_api_update_change_kanban_order_if_swimlane_change(client): user1 = f.UserFactory.create() project = f.create_project(owner=user1) f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) swimlane1 = f.SwimlaneFactory(project=project) swimlane2 = f.SwimlaneFactory(project=project) us = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) url = reverse("userstories-detail", args=[us.pk]) data = { "version": us.version, "swimlane": swimlane2.id } client.login(project.owner) response = client.json.patch(url, json.dumps(data)) assert response.status_code == 200, response.data assert us.kanban_order < response.data["kanban_order"] assert swimlane2.id == response.data["swimlane"] def test_api_headers_userstories_without_swimlane_false(client): user1 = f.UserFactory.create() project = f.create_project(owner=user1) f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) swimlane1 = f.SwimlaneFactory(project=project) us1 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) us2 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) us3 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) url = f"{reverse('userstories-list')}?project={project.id}" client.login(project.owner) response = client.json.get(url) assert response.status_code == 200, response.data assert "taiga-info-userstories-without-swimlane" in response["access-control-expose-headers"] assert response.has_header("Taiga-Info-Userstories-Without-Swimlane") == True assert response["taiga-info-userstories-without-swimlane"] == "false" def test_api_headers_userstories_without_swimlane_true(client): user1 = f.UserFactory.create() project = f.create_project(owner=user1) f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) swimlane1 = f.SwimlaneFactory(project=project) us1 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) us2 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=None) us3 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) url = f"{reverse('userstories-list')}?project={project.id}" client.login(project.owner) response = client.json.get(url) assert response.status_code == 200, response.data assert "taiga-info-userstories-without-swimlane" in response["access-control-expose-headers"] assert response.has_header("Taiga-Info-Userstories-Without-Swimlane") == True assert response["taiga-info-userstories-without-swimlane"] == "true" def test_api_headers_userstories_without_swimlane_not_send(client): user1 = f.UserFactory.create() project = f.create_project(owner=user1) f.MembershipFactory.create(project=project, user=project.owner, is_admin=True) swimlane1 = f.SwimlaneFactory(project=project) us1 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) us2 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=None) us3 = f.create_userstory(project=project, owner=user1, status=project.default_us_status, swimlane=swimlane1) url = reverse('userstories-list') client.login(project.owner) response = client.json.get(url) assert response.status_code == 200, response.data assert "taiga-info-userstories-without-swimlane" in response["access-control-expose-headers"] assert response.has_header("Taiga-Info-Userstories-Without-Swimlane") == False
taigaio/taiga-back
tests/integration/test_userstories.py
Python
agpl-3.0
71,594
#!/usr/bin/env python import wx import os import sys try: dirName = os.path.dirname(os.path.abspath(__file__)) except: dirName = os.path.dirname(os.path.abspath(sys.argv[0])) sys.path.append(os.path.split(dirName)[0]) try: from agw import thumbnailctrl as TC except ImportError: # if it's not there locally, try the wxPython lib. import wx.lib.agw.thumbnailctrl as TC import images class ThumbnailCtrlDemo(wx.Frame): def __init__(self, parent, log): wx.Frame.__init__(self, parent) self.SetIcon(images.Mondrian.GetIcon()) self.SetTitle("ThumbnailCtrl wxPython Demo ;-)") self.statusbar = self.CreateStatusBar(2) self.statusbar.SetStatusWidths([-2, -1]) # statusbar fields statusbar_fields = [("ThumbnailCtrl Demo, Andrea Gavana @ 10 Dec 2005"), ("Welcome To wxPython!")] for i in range(len(statusbar_fields)): self.statusbar.SetStatusText(statusbar_fields[i], i) self.SetMenuBar(self.CreateMenuBar()) splitter = wx.SplitterWindow(self, -1, style=wx.CLIP_CHILDREN | wx.SP_3D | wx.WANTS_CHARS | wx.SP_LIVE_UPDATE) self.panel = wx.Panel(splitter, -1) sizer = wx.BoxSizer(wx.HORIZONTAL) scroll = TC.ThumbnailCtrl(splitter, -1, imagehandler=TC.NativeImageHandler) scroll.ShowFileNames() if os.path.isdir("../bitmaps"): scroll.ShowDir(os.path.normpath(os.getcwd() + "/../bitmaps")) else: scroll.ShowDir(os.getcwd()) self.TC = scroll self.log = log self.thumbsizer_staticbox = wx.StaticBox(self.panel, -1, "Thumb Style") self.customsizer_staticbox = wx.StaticBox(self.panel, -1, "Thumb Customization") self.optionsizer_staticbox = wx.StaticBox(self.panel, -1, "More Options") self.dirsizer_staticbox = wx.StaticBox(self.panel, -1, "Directory Selection") self.dirbutton = wx.Button(self.panel, -1, "Change Directory") self.radiostyle1 = wx.RadioButton(self.panel, -1, "THUMB_OUTLINE_NONE", style=wx.RB_GROUP) self.radiostyle2 = wx.RadioButton(self.panel, -1, "THUMB_OUTLINE_FULL") self.radiostyle3 = wx.RadioButton(self.panel, -1, "THUMB_OUTLINE_RECT") self.radiostyle4 = wx.RadioButton(self.panel, -1, "THUMB_OUTLINE_IMAGE") self.highlight = wx.CheckBox(self.panel, -1, "Highlight on pointing") self.showfiles = wx.CheckBox(self.panel, -1, "Show file names") self.enabledragging = wx.CheckBox(self.panel, -1, "Enable drag and drop") self.setpopup = wx.CheckBox(self.panel, -1, "Set popup menu on thumbs") self.setgpopup = wx.CheckBox(self.panel, -1, "Set global popup menu") self.showcombo = wx.CheckBox(self.panel, -1, "Show folder combobox") self.enabletooltip = wx.CheckBox(self.panel, -1, "Enable thumb tooltips") self.textzoom = wx.TextCtrl(self.panel, -1, "1.4") self.zoombutton = wx.Button(self.panel, -1, "Set zoom factor") self.fontbutton = wx.Button(self.panel, -1, "Set caption font") self.colourbutton = wx.Button(self.panel, -1, "Set selection colour") self.radios = [self.radiostyle1, self.radiostyle2, self.radiostyle3, self.radiostyle4] self.thumbstyles = ["THUMB_OUTLINE_NONE", "THUMB_OUTLINE_FULL", "THUMB_OUTLINE_RECT", "THUMB_OUTLINE_IMAGE"] self.SetProperties() self.DoLayout() self.panel.SetSizer(sizer) sizer.Layout() self.Bind(wx.EVT_RADIOBUTTON, self.OnChangeOutline, self.radiostyle1) self.Bind(wx.EVT_RADIOBUTTON, self.OnChangeOutline, self.radiostyle2) self.Bind(wx.EVT_RADIOBUTTON, self.OnChangeOutline, self.radiostyle3) self.Bind(wx.EVT_RADIOBUTTON, self.OnChangeOutline, self.radiostyle4) self.Bind(wx.EVT_CHECKBOX, self.OnHighlight, self.highlight) self.Bind(wx.EVT_CHECKBOX, self.OnShowFiles, self.showfiles) self.Bind(wx.EVT_CHECKBOX, self.OnEnableDragging, self.enabledragging) self.Bind(wx.EVT_CHECKBOX, self.OnSetPopup, self.setpopup) self.Bind(wx.EVT_CHECKBOX, self.OnSetGlobalPopup, self.setgpopup) self.Bind(wx.EVT_CHECKBOX, self.OnShowComboBox, self.showcombo) self.Bind(wx.EVT_CHECKBOX, self.OnEnableToolTips, self.enabletooltip) self.Bind(wx.EVT_BUTTON, self.OnSetZoom, self.zoombutton) self.Bind(wx.EVT_BUTTON, self.OnSetFont, self.fontbutton) self.Bind(wx.EVT_BUTTON, self.OnSetColour, self.colourbutton) self.Bind(wx.EVT_BUTTON, self.OnSetDirectory, self.dirbutton) self.TC.Bind(TC.EVT_THUMBNAILS_SEL_CHANGED, self.OnSelChanged) self.TC.Bind(TC.EVT_THUMBNAILS_POINTED, self.OnPointed) self.TC.Bind(TC.EVT_THUMBNAILS_DCLICK, self.OnDClick) splitter.SplitVertically(scroll, self.panel, 180) splitter.SetMinimumPaneSize(140) self.SetMinSize((700, 590)) self.CenterOnScreen() def SetProperties(self): self.radiostyle4.SetValue(1) self.showfiles.SetValue(1) boldFont = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, "") self.zoombutton.SetFont(boldFont) self.fontbutton.SetFont(boldFont) self.dirbutton.SetFont(boldFont) self.colourbutton.SetFont(boldFont) def DoLayout(self): splitsizer = wx.BoxSizer(wx.VERTICAL) optionsizer = wx.StaticBoxSizer(self.optionsizer_staticbox, wx.VERTICAL) zoomsizer = wx.BoxSizer(wx.HORIZONTAL) customsizer = wx.StaticBoxSizer(self.customsizer_staticbox, wx.VERTICAL) thumbsizer = wx.StaticBoxSizer(self.thumbsizer_staticbox, wx.VERTICAL) radiosizer = wx.BoxSizer(wx.VERTICAL) dirsizer = wx.StaticBoxSizer(self.dirsizer_staticbox, wx.HORIZONTAL) dirsizer.Add(self.dirbutton, 0, wx.LEFT|wx.BOTTOM|wx.ALIGN_CENTER_VERTICAL|wx.ADJUST_MINSIZE, 3) splitsizer.Add(dirsizer, 0, wx.EXPAND|wx.TOP|wx.LEFT, 5) radiosizer.Add(self.radiostyle1, 0, wx.LEFT|wx.TOP|wx.ADJUST_MINSIZE, 3) radiosizer.Add(self.radiostyle2, 0, wx.LEFT|wx.TOP|wx.ADJUST_MINSIZE, 3) radiosizer.Add(self.radiostyle3, 0, wx.LEFT|wx.TOP|wx.ADJUST_MINSIZE, 3) radiosizer.Add(self.radiostyle4, 0, wx.LEFT|wx.TOP|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) thumbsizer.Add(radiosizer, 1, wx.EXPAND, 0) splitsizer.Add(thumbsizer, 0, wx.TOP|wx.EXPAND|wx.LEFT, 5) customsizer.Add(self.highlight, 0, wx.LEFT|wx.TOP|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) customsizer.Add(self.showfiles, 0, wx.LEFT|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) customsizer.Add(self.enabledragging, 0, wx.LEFT|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) customsizer.Add(self.setpopup, 0, wx.LEFT|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) customsizer.Add(self.setgpopup, 0, wx.LEFT|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) customsizer.Add(self.showcombo, 0, wx.LEFT|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) customsizer.Add(self.enabletooltip, 0, wx.LEFT|wx.BOTTOM|wx.ADJUST_MINSIZE, 3) splitsizer.Add(customsizer, 0, wx.TOP|wx.EXPAND|wx.LEFT, 5) zoomsizer.Add(self.textzoom, 1, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL|wx.ADJUST_MINSIZE, 3) zoomsizer.Add(self.zoombutton, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL|wx.ADJUST_MINSIZE, 3) optionsizer.Add(zoomsizer, 1, wx.EXPAND, 0) optionsizer.Add(self.fontbutton, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ADJUST_MINSIZE, 3) optionsizer.Add(self.colourbutton, 0, wx.TOP|wx.LEFT|wx.ALIGN_CENTER_VERTICAL|wx.ADJUST_MINSIZE, 3) splitsizer.Add(optionsizer, 0, wx.EXPAND | wx.TOP|wx.LEFT, 5) self.panel.SetAutoLayout(True) self.panel.SetSizer(splitsizer) splitsizer.Fit(self.panel) def CreateMenuBar(self): file_menu = wx.Menu() AS_EXIT = wx.NewId() file_menu.Append(AS_EXIT, "&Exit") self.Bind(wx.EVT_MENU, self.OnClose, id=AS_EXIT) help_menu = wx.Menu() AS_ABOUT = wx.NewId() help_menu.Append(AS_ABOUT, "&About...") self.Bind(wx.EVT_MENU, self.OnAbout, id=AS_ABOUT) menu_bar = wx.MenuBar() menu_bar.Append(file_menu, "&File") menu_bar.Append(help_menu, "&Help") return menu_bar def OnClose(self, event): self.Destroy() def OnAbout(self, event): msg = "This Is The About Dialog Of The ThumbnailCtrl Demo.\n\n" + \ "Author: Andrea Gavana @ 10 Dec 2005\n\n" + \ "Please Report Any Bug/Requests Of Improvements\n" + \ "To Me At The Following Adresses:\n\n" + \ "andrea.gavana@agip.it\n" + "andrea_gavana@tin.it\n\n" + \ "Welcome To wxPython " + wx.VERSION_STRING + "!!" dlg = wx.MessageDialog(self, msg, "ThumbnailCtrl Demo", wx.OK | wx.ICON_INFORMATION) dlg.SetFont(wx.Font(8, wx.NORMAL, wx.NORMAL, wx.NORMAL, False)) dlg.ShowModal() dlg.Destroy() def OnSetDirectory(self, event): dlg = wx.DirDialog(self, "Choose a directory with images:", defaultPath=os.getcwd(), style=wx.DD_DEFAULT_STYLE|wx.DD_NEW_DIR_BUTTON) # If the user selects OK, then we process the dialog's data. # This is done by getting the path data from the dialog - BEFORE # we destroy it. if dlg.ShowModal() == wx.ID_OK: self.TC.ShowDir(dlg.GetPath()) self.log.write("OnSetDirectory: directory changed to: %s\n"%dlg.GetPath()) # Only destroy a dialog after you're done with it. dlg.Destroy() def OnChangeOutline(self, event): # wxGlade: MyFrame.<event_handler> radio = event.GetEventObject() pos = self.radios.index(radio) if pos == 0: self.TC.SetThumbOutline(TC.THUMB_OUTLINE_NONE) elif pos == 1: self.TC.SetThumbOutline(TC.THUMB_OUTLINE_FULL) elif pos == 2: self.TC.SetThumbOutline(TC.THUMB_OUTLINE_RECT) elif pos == 3: self.TC.SetThumbOutline(TC.THUMB_OUTLINE_IMAGE) self.TC.Refresh() self.log.write("OnChangeOutline: Outline changed to: %s\n"%self.thumbstyles[pos]) event.Skip() def OnHighlight(self, event): # wxGlade: MyFrame.<event_handler> if self.highlight.GetValue() == 1: self.TC.SetHighlightPointed(True) self.log.write("OnHighlight: Highlight thumbs on pointing\n") else: self.TC.SetHighlightPointed(False) self.log.write("OnHighlight: Don't Highlight thumbs on pointing\n") event.Skip() def OnShowFiles(self, event): # wxGlade: MyFrame.<event_handler> if self.showfiles.GetValue() == 1: self.TC.ShowFileNames(True) self.log.write("OnShowFiles: Thumbs file names shown\n") else: self.TC.ShowFileNames(False) self.log.write("OnShowFiles: Thumbs file names not shown\n") self.TC.Refresh() event.Skip() def OnEnableDragging(self, event): if self.enabledragging.GetValue() == 1: self.TC.EnableDragging(True) self.log.write("OnEnableDragging: Thumbs drag and drop enabled\n") else: self.TC.EnableDragging(False) self.log.write("OnEnableDragging: Thumbs drag and drop disabled\n") self.TC.Refresh() event.Skip() def OnSetPopup(self, event): # wxGlade: MyFrame.<event_handler> if self.setpopup.GetValue() == 1: menu = self.CreatePopups() self.TC.SetPopupMenu(menu) self.log.write("OnSetPopup: Popups enabled on thumbs\n") else: self.TC.SetPopupMenu(None) self.log.write("OnSetPopup: Popups disabled on thumbs\n") event.Skip() def OnSetGlobalPopup(self, event): if self.setgpopup.GetValue() == 1: menu = self.CreateGlobalPopups() self.TC.SetGlobalPopupMenu(menu) self.log.write("OnSetGlobalPopup: Popups enabled globally (no selection needed)\n") else: self.TC.SetGlobalPopupMenu(None) self.log.write("OnSetGlobalPopup: Popups disabled globally (no selection needed)\n") event.Skip() def OnShowComboBox(self, event): if self.showcombo.GetValue() == 1: self.log.write("OnShowComboBox: Directory comboBox shown\n") self.TC.ShowComboBox(True) else: self.log.write("OnShowComboBox: Directory comboBox hidden\n") self.TC.ShowComboBox(False) event.Skip() def OnEnableToolTips(self, event): if self.enabletooltip.GetValue() == 1: self.log.write("OnEnableToolTips: File information on tooltips enabled\n") self.TC.EnableToolTips(True) else: self.log.write("OnEnableToolTips: File information on tooltips disabled\n") self.TC.EnableToolTips(False) event.Skip() def OnSetZoom(self, event): # wxGlade: MyFrame.<event_handler> val = self.textzoom.GetValue().strip() try: val = float(val) except: errstr = "Error: a float value is required." dlg = wx.MessageDialog(self, errstr, "ThumbnailCtrlDemo Error", wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() self.textzoom.SetValue("1.4") return if val < 1.0: errstr = "Error: zoom factor must be grater than 1.0." dlg = wx.MessageDialog(self, errstr, "ThumbnailCtrlDemo Error", wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() self.textzoom.SetValue("1.4") return self.TC.SetZoomFactor(val) event.Skip() def OnSelChanged(self, event): self.log.write("OnSelChanged: Thumb selected: %s\n"%str(self.TC.GetSelection())) event.Skip() def OnPointed(self, event): self.log.write("OnPointed: Thumb pointed: %s\n"%self.TC.GetPointed()) event.Skip() def OnDClick(self, event): self.log.write("OnDClick: Thumb double-clicked: %s\n"%self.TC.GetSelection()) event.Skip() def OnSetFont(self, event): # wxGlade: MyFrame.<event_handler> data = wx.FontData() data.EnableEffects(True) data.SetInitialFont(self.TC.GetCaptionFont()) dlg = wx.FontDialog(self, data) if dlg.ShowModal() == wx.ID_OK: data = dlg.GetFontData() font = data.GetChosenFont() self.TC.SetCaptionFont(font) self.TC.Refresh() self.log.write("OnSetFont: Caption font changed\n") # Don't destroy the dialog until you get everything you need from the # dialog! dlg.Destroy() event.Skip() def OnSetColour(self, event): dlg = wx.ColourDialog(self) # Ensure the full colour dialog is displayed, # not the abbreviated version. dlg.GetColourData().SetChooseFull(True) if dlg.ShowModal() == wx.ID_OK: # If the user selected OK, then the dialog's wx.ColourData will # contain valid information. Fetch the data ... data = dlg.GetColourData() # ... then do something with it. The actual colour data will be # returned as a three-tuple (r, g, b) in this particular case. colour = data.GetColour().Get() colour = wx.Colour(colour[0], colour[1], colour[2]) self.TC.SetSelectionColour(colour) self.TC.Refresh() # Once the dialog is destroyed, Mr. wx.ColourData is no longer your # friend. Don't use it again! dlg.Destroy() def CreatePopups(self): if not hasattr(self, "popupID1"): self.popupID1 = wx.NewId() self.popupID2 = wx.NewId() self.popupID3 = wx.NewId() self.popupID4 = wx.NewId() self.popupID5 = wx.NewId() self.popupID6 = wx.NewId() self.popupID7 = wx.NewId() self.popupID8 = wx.NewId() self.popupID9 = wx.NewId() self.popupID10 = wx.NewId() self.popupID11 = wx.NewId() self.popupID12 = wx.NewId() self.Bind(wx.EVT_MENU, self.OnPopupOne, id=self.popupID1) self.Bind(wx.EVT_MENU, self.OnPopupTwo, id=self.popupID2) self.Bind(wx.EVT_MENU, self.OnPopupThree, id=self.popupID3) self.Bind(wx.EVT_MENU, self.OnPopupFour, id=self.popupID4) self.Bind(wx.EVT_MENU, self.OnPopupFive, id=self.popupID5) self.Bind(wx.EVT_MENU, self.OnPopupSix, id=self.popupID6) self.Bind(wx.EVT_MENU, self.OnPopupSeven, id=self.popupID7) self.Bind(wx.EVT_MENU, self.OnPopupEight, id=self.popupID8) self.Bind(wx.EVT_MENU, self.OnPopupNine, id=self.popupID9) menu = wx.Menu() item = wx.MenuItem(menu, self.popupID1, "One") img = images.Mondrian.GetImage() img.Rescale(16, 16) bmp = img.ConvertToBitmap() item.SetBitmap(bmp) menu.Append(item) # add some other items menu.Append(self.popupID2, "Two") menu.Append(self.popupID3, "Three") menu.Append(self.popupID4, "Four") menu.Append(self.popupID5, "Five") menu.Append(self.popupID6, "Six") # make a submenu sm = wx.Menu() sm.Append(self.popupID8, "Sub Item 1") sm.Append(self.popupID9, "Sub Item 1") menu.Append(self.popupID7, "Test Submenu", sm) return menu def CreateGlobalPopups(self): if not hasattr(self, "popupID10"): self.popupID10 = wx.NewId() self.popupID11 = wx.NewId() self.popupID12 = wx.NewId() self.Bind(wx.EVT_MENU, self.OnPopupTen, id=self.popupID10) self.Bind(wx.EVT_MENU, self.OnPopupEleven, id=self.popupID11) self.Bind(wx.EVT_MENU, self.OnPopupTwelve, id=self.popupID12) menu = wx.Menu() item = wx.MenuItem(menu, self.popupID10, "Select all") menu.Append(item) menu.AppendSeparator() item = wx.MenuItem(menu, self.popupID11, "Say Hello!") img = images.Mondrian.GetImage() img.Rescale(16, 16) bmp = img.ConvertToBitmap() item.SetBitmap(bmp) menu.Append(item) menu.AppendSeparator() menu.Append(self.popupID12, "Get thumbs count") return menu def OnPopupOne(self, event): self.log.write("OnPopupMenu: Popup One\n") def OnPopupTwo(self, event): self.log.write("OnPopupMenu: Popup Two\n") def OnPopupThree(self, event): self.log.write("OnPopupMenu: Popup Three\n") def OnPopupFour(self, event): self.log.write("OnPopupMenu: Popup Four\n") def OnPopupFive(self, event): self.log.write("OnPopupMenu: Popup Five\n") def OnPopupSix(self, event): self.log.write("OnPopupMenu: Popup Six\n") def OnPopupSeven(self, event): self.log.write("OnPopupMenu: Popup Seven\n") def OnPopupEight(self, event): self.log.write("OnPopupMenu: Popup Eight\n") def OnPopupNine(self, event): self.log.write("OnPopupMenu: Popup Nine\n") def OnPopupTen(self, event): items = self.TC.GetItemCount() self.log.write("Items", items, type(items)) for ii in range(items): self.TC.SetSelection(ii) self.log.write("OnGlobalPopupMenu: all thumbs selected\n") event.Skip() def OnPopupEleven(self, event): self.log.write("OnGlobalPopupMenu: say hello message...\n") msgstr = "Info: let's say hello to wxPython! " dlg = wx.MessageDialog(self, msgstr, "ThumbnailCtrlDemo Info", wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() event.Skip() def OnPopupTwelve(self, event): items = self.TC.GetItemCount() self.log.write("OnGlobalPopupMenu: number of thumbs: %d\n"%items) msgstr = "Info: number of thumbs: %d"%items dlg = wx.MessageDialog(self, msgstr, "ThumbnailCtrlDemo Info", wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() event.Skip() #--------------------------------------------------------------------------- class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) b = wx.Button(self, -1, " Test ThumbnailCtrl ", (50,50)) self.Bind(wx.EVT_BUTTON, self.OnButton, b) def OnButton(self, evt): self.win = ThumbnailCtrlDemo(self, self.log) self.win.Show(True) #---------------------------------------------------------------------- def runTest(frame, nb, log): try: import PIL.Image win = TestPanel(nb, log) return win except ImportError: from Main import MessagePanel win = MessagePanel(nb, 'This demo requires PIL (Python Imaging Library).', 'Sorry', wx.ICON_WARNING) return win #---------------------------------------------------------------------- overview = TC.__doc__ if __name__ == '__main__': import sys,os import run run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
dnxbjyj/python-basic
gui/wxpython/wxPython-demo-4.0.1/demo/agw/ThumbnailCtrl.py
Python
mit
21,653
# -*- coding: utf-8 -*- # Copyright (c) 2010 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Glance documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()'d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import subprocess import sys import warnings # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path = [ os.path.abspath('../..'), os.path.abspath('../../bin') ] + sys.path # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'oslosphinx', 'stevedore.sphinxext', 'oslo_config.sphinxext', 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'oslo_config.sphinxconfiggen', ] config_generator_config_file = [ ('../../etc/oslo-config-generator/glance-api.conf', '_static/glance-api'), ('../../etc/oslo-config-generator/glance-cache.conf', '_static/glance-cache'), ('../../etc/oslo-config-generator/glance-manage.conf', '_static/glance-manage'), ('../../etc/oslo-config-generator/glance-registry.conf', '_static/glance-registry'), ('../../etc/oslo-config-generator/glance-scrubber.conf', '_static/glance-scrubber'), ] # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Glance' copyright = u'2010-present, OpenStack Foundation.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from glance.version import version_info as glance_version # The full version, including alpha/beta/rc tags. release = glance_version.version_string_with_vcs() # The short X.Y version. version = glance_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. #exclude_trees = ['api'] exclude_patterns = [ # The man directory includes some snippet files that are included # in other documents during the build but that should not be # included in the toctree themselves, so tell Sphinx to ignore # them when scanning for input files. 'man/footer.rst', 'man/general_options.rst', 'man/openstack_options.rst', ] # The reST default role (for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['glance.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/glanceapi', 'glance-api', u'Glance API Server', [u'OpenStack'], 1), ('man/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner', [u'OpenStack'], 1), ('man/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager', [u'OpenStack'], 1), ('man/glancecacheprefetcher', 'glance-cache-prefetcher', u'Glance Cache Pre-fetcher', [u'OpenStack'], 1), ('man/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner', [u'OpenStack'], 1), ('man/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ', [u'OpenStack'], 1), ('man/glancemanage', 'glance-manage', u'Glance Management Utility', [u'OpenStack'], 1), ('man/glanceregistry', 'glance-registry', u'Glance Registry Server', [u'OpenStack'], 1), ('man/glancereplicator', 'glance-replicator', u'Glance Replicator', [u'OpenStack'], 1), ('man/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service', [u'OpenStack'], 1) ] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] try: html_last_updated_fmt = subprocess.Popen( git_cmd, stdout=subprocess.PIPE).communicate()[0] except Exception: warnings.warn('Cannot get last updated time from git repository. ' 'Not setting "html_last_updated_fmt".') # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'glancedoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). latex_documents = [ ('index', 'Glance.tex', u'Glance Documentation', u'Glance Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
rajalokan/glance
doc/source/conf.py
Python
apache-2.0
9,900
""" Power Plant """ from collections import OrderedDict from experiments.experiment import Experiment from models.hmc_samplers import HMCSampler, SGHMCSampler from models.ld_samplers import LDSampler, SGLDSampler, pSGLDSampler from experiments.power_plant.pp_env import PowerPlantEnv class PowerPlantExp(Experiment): def __init__(self): super().__init__() def _setup_sampler_defaults(self, sampler_params): sampler_params['noise_precision'] = 5. sampler_params['weights_precision'] = 1. def run_baseline_hmc(self): env = PowerPlantEnv() self.setup_env_defaults(env) env.model_name = 'hmc' env.test_case_name = 'baseline' env.chains_num = 10 env.n_samples = 10 env.thinning = 0 sampler_params = dict() sampler_params['step_sizes'] = .0005 sampler_params['hmc_steps'] = 10 sampler_params['mh_correction'] = True sampler_params['batch_size'] = None sampler_params['seek_step_sizes'] = False sampler_params['fade_in_velocities'] = True env.setup_data_dir() self.configure_env_mcmc(env, HMCSampler, sampler_params) env.run() def run_sgld(self): env = PowerPlantEnv() self.setup_env_defaults(env) env.model_name = 'sgld' env.n_samples = 100 env.thinning = 3 sampler_params = dict() sampler_params['step_sizes'] = .001 sampler_params['fade_in_velocities'] = True env.setup_data_dir() self.configure_env_mcmc(env, SGLDSampler, sampler_params) env.run() def run_sghmc(self): env = PowerPlantEnv() self.setup_env_defaults(env) env.model_name = 'sghmc' env.chains_num = 1 env.n_samples = 50 env.thinning = 0 sampler_params = dict() sampler_params['step_sizes'] = .0005 sampler_params['hmc_steps'] = 25 sampler_params['friction'] = 1. sampler_params['fade_in_velocities'] = True env.setup_data_dir() self.configure_env_mcmc(env, SGHMCSampler, sampler_params) env.run() def run_psgld(self): env = PowerPlantEnv() self.setup_env_defaults(env) env.model_name = 'psgld' env.n_samples = 100 env.thinning = 4 sampler_params = dict() sampler_params['step_sizes'] = .001 sampler_params['preconditioned_alpha'] = .999 sampler_params['preconditioned_lambda'] = .01 sampler_params['fade_in_velocities'] = True env.setup_data_dir() self.configure_env_mcmc(env, pSGLDSampler, sampler_params) env.run() def run_dropout(self): env = PowerPlantEnv() self.setup_env_defaults(env) env.model_name = 'dropout' env.n_samples = 40 sampler_params = dict() sampler_params['n_epochs'] = 5 dropout = 0.05 tau = 0.087 env.setup_data_dir() self.configure_env_dropout(env, sampler_params=sampler_params, dropout=dropout, tau=tau) env.run() def run_pbp(self): env = PowerPlantEnv() self.setup_env_defaults(env) env.model_name = 'pbp' env.n_samples = 1000 env.n_chunks = 10 env.setup_data_dir() self.configure_env_pbp(env, n_epochs=4) env.run() def main(): experiment = PowerPlantExp() queue = OrderedDict() queue['HMC'] = experiment.run_baseline_hmc queue['SGLD'] = experiment.run_sgld queue['SGHMC'] = experiment.run_sghmc queue['pSGLD'] = experiment.run_psgld # queue["PBP"] = experiment.run_pbp queue['Dropout'] = experiment.run_dropout experiment.run_queue(queue, cpu=False) experiment.report_metrics_table(queue) del queue['HMC'] max_time = 15 experiment.plot_multiple_metrics('HMC', queue.keys(), ['KS'], max_time=max_time, title_name='KS distance') experiment.plot_multiple_metrics('HMC', queue.keys(), ['Precision'], max_time=max_time, title_name='Precision') experiment.plot_multiple_metrics('HMC', queue.keys(), ['Recall'], max_time=max_time, title_name='Recall') # experiment.plot_multiple_metrics("HMC", queue.keys(), ["KL"]) # experiment.plot_multiple_metrics("HMC", queue.keys(), ["F1"], max_time=max_time, title_name="F1 score") # experiment.plot_multiple_metrics("HMC", queue.keys(), ["IoU"], max_time=max_time) if __name__ == '__main__': main()
myshkov/bnn-analysis
experiments/power_plant/pp_exp.py
Python
mit
4,489
# -*- coding: utf-8 -*- """Module that helps in checking the correctness of CSV file structure."""
TMiguelT/csvschema
csv_schema/__init__.py
Python
mit
103
# python-modeled # # Copyright (C) 2014 Stefan Zimmermann <zimmermann.code@gmail.com> # # python-modeled is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # python-modeled is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with python-modeled. If not, see <http://www.gnu.org/licenses/>. """modeled.datetime The modeled.datetime (mdatetime) class. .. moduleauthor:: Stefan Zimmermann <zimmermann.code@gmail.com> """ from __future__ import absolute_import from six import with_metaclass __all__ = ['datetime'] from datetime import datetime as base from moretools import isstring class Meta(type(base)): """Metaclass for :class:`modeled.datetime`. """ def __getitem__(cls, format): class subclass(cls): pass subclass.format = format = str(format) subclass.__name__ = subclass.__qualname__ = '%s[%s]' % ( cls.__name__, repr(format)) class datetime(with_metaclass(Meta, base)): """modeled.datetime, a :class:`datetime.datetime`-derived class with these additional features: - Class-bound <datetime class>.format string for converting to and from string representation, which defaults to '%Y-%m-%d %H:%M:%S' and can be customized by creating subclasses via modeled.datetime[<format string>] - Supports direct instantiation from single string arg, implicitly using the class-bound format for parsing. """ format = '%Y-%m-%d %H:%M:%S' def __new__(cls, string_or_year, *mdhms): """Create a new :class:`modeled.datetime` instance by giving either a """ if isstring(string_or_year): dt = base.strptime(string_or_year, cls.format) return base.__new__(cls, *dt.timetuple()[:6]) return base.__new__(cls, string_or_year, *mdhms)
userzimmermann/python-modeled
modeled/datetime.py
Python
gpl-3.0
2,277
#!/usr/bin/env python import sys, os sys.path.append("../") from demo_runner import Demo class CDKDemo(Demo): def run_demo(self): os.chdir("demo") self.print_comment("Let's take a look at the vagrant files included with the CDK") self.print_and_exec_cmd("tree") self.print_comment("Let's create a working directory and make a docker server.") self.print_and_exec_cmd("mkdir docker-dev") self.print_and_exec_cmd("cp -rvf cdk/components/rhel-with-docker/* docker-dev/") self.print_and_exec_cmd("tree docker-dev") self.print_comment("Now our credentials are already set in environment variables like this:") self.print_comment("export SUB_USERNAME='username'") self.print_comment("export SUB_PASSWORD='password'") self.print_comment("The Vagrantfile picks up those creds.") self.print_and_exec_cmd("cat docker-dev/docker-host/Vagrantfile") self.print_comment( """We are going to add an image to the 'dev' Vagrantfile. While we are at it we are going to map a port to access the container""") self.print_and_exec_cmd("sed -i -e 's|d.build_dir = \".\"|d.image = \"fedora/apache\"\\n d.ports=[\"8080:80\"]|g' docker-dev/dev/Vagrantfile") self.print_and_exec_cmd("cat docker-dev/dev/Vagrantfile") self.print_comment("Now we can start the server up") self.print_and_exec_cmd("cd docker-dev/dev/") USER_PROMPT = "[root@example.com dev]# " os.chdir("docker-dev/dev/") self.print_and_exec_cmd("vagrant up") self.print_comment("Finally, we can see that apache is now running in a docker container on our rhel host") self.print_and_exec_cmd("export HOST_IP=$(virsh net-dhcp-leases vagrant-libvirt | grep docker | awk '{print $6}' | sed -e 's|/24||g')") self.print_and_exec_cmd("curl -L http://$HOST_IP:8080/") if __name__ == '__main__': demo = CDKDemo() demo.demo()
whitel/summit-cdk-demo-2015
demo_cdk_use/demo_cdk_run_apache.py
Python
gpl-2.0
1,991
from django.conf import settings from django.template import RequestContext from django.shortcuts import render_to_response, redirect from django.contrib.auth.decorators import login_required from django.contrib.auth import logout as auth_logout from social.apps.django_app.utils import strategy from social.backends.google import GooglePlusAuth def logout(request): """Logs out user""" auth_logout(request) return render_to_response('login.html', {}, RequestContext(request)) def login(request): """Social Auth main view, displays login mechanism""" if request.user.is_authenticated(): return redirect('done') return render_to_response('login.html', { 'plus_id': getattr(settings, 'SOCIAL_AUTH_GOOGLE_PLUS_KEY', None) }, RequestContext(request)) @login_required def done(request): """Login complete view, displays user data""" scope = ' '.join(GooglePlusAuth.DEFAULT_SCOPE) #print "whazzzuppp!! Guess what? I was just requested! \n",request,"\n" print "=======================" print dir(strategy) print "=======================" print dir(request) print "=======================" return render_to_response('done.html', { 'user': request.user, 'plus_id': getattr(settings, 'SOCIAL_AUTH_GOOGLE_PLUS_KEY', None), 'plus_scope': scope }, RequestContext(request)) def signup_email(request): return render_to_response('email_signup.html', {}, RequestContext(request)) def validation_sent(request): return render_to_response('validation_sent.html', { 'email': request.session.get('email_validation_address') }, RequestContext(request)) def require_email(request): if request.method == 'POST': request.session['saved_email'] = request.POST.get('email') backend = request.session['partial_pipeline']['backend'] return redirect('social:complete', backend=backend) return render_to_response('email.html', RequestContext(request))
arcolife/django-scholarec
scholarec_web/scholarec_web/app/views.py
Python
gpl-3.0
1,993
# -*- coding: utf-8 -*- """ Utilities ========= Miscellaneous utilities. """ # Author: Eric Larson # License: 3-clause BSD from __future__ import division, absolute_import, print_function import hashlib import os from shutil import move, copyfile import subprocess from . import sphinx_compatibility from sphinx.errors import ExtensionError logger = sphinx_compatibility.getLogger('sphinx-gallery') def _get_image(): try: from PIL import Image except ImportError as exc: # capture the error for the modern way try: import Image except ImportError: raise ExtensionError( 'Could not import pillow, which is required ' 'to rescale images (e.g., for thumbnails): %s' % (exc,)) return Image def scale_image(in_fname, out_fname, max_width, max_height): """Scales an image with the same aspect ratio centered in an image box with the given max_width and max_height if in_fname == out_fname the image can only be scaled down """ # local import to avoid testing dependency on PIL: Image = _get_image() img = Image.open(in_fname) # XXX someday we should just try img.thumbnail((max_width, max_height)) ... width_in, height_in = img.size scale_w = max_width / float(width_in) scale_h = max_height / float(height_in) if height_in * scale_w <= max_height: scale = scale_w else: scale = scale_h if scale >= 1.0 and in_fname == out_fname: return width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image using resize; if using .thumbnail and the image is # already smaller than max_width, max_height, then this won't scale up # at all (maybe could be an option someday...) img = img.resize((width_sc, height_sc), Image.BICUBIC) # img.thumbnail((width_sc, height_sc), Image.BICUBIC) # width_sc, height_sc = img.size # necessary if using thumbnail # insert centered thumb = Image.new('RGBA', (max_width, max_height), (255, 255, 255, 255)) pos_insert = ((max_width - width_sc) // 2, (max_height - height_sc) // 2) thumb.paste(img, pos_insert) try: thumb.save(out_fname) except IOError: # try again, without the alpha channel (e.g., for JPEG) thumb.convert('RGB').save(out_fname) def optipng(fname, args=()): """Optimize a PNG in place. Parameters ---------- fname : str The filename. If it ends with '.png', ``optipng -o7 fname`` will be run. If it fails because the ``optipng`` executable is not found or optipng fails, the function returns. args : tuple Extra command-line arguments, such as ``['-o7']``. """ if fname.endswith('.png'): # -o7 because this is what CPython used # https://github.com/python/cpython/pull/8032 try: subprocess.check_call( ['optipng'] + list(args) + [fname], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (subprocess.CalledProcessError, IOError): # FileNotFoundError pass def _has_optipng(): try: subprocess.check_call(['optipng', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except IOError: # FileNotFoundError return False else: return True def replace_py_ipynb(fname): """Replace .py extension in filename by .ipynb""" fname_prefix, extension = os.path.splitext(fname) allowed_extension = '.py' if extension != allowed_extension: raise ValueError( "Unrecognized file extension, expected %s, got %s" % (allowed_extension, extension)) new_extension = '.ipynb' return '{}{}'.format(fname_prefix, new_extension) def get_md5sum(src_file, mode='b'): """Returns md5sum of file Parameters ---------- src_file : str Filename to get md5sum for. mode : 't' or 'b' File mode to open file with. When in text mode, universal line endings are used to ensure consitency in hashes between platforms. """ errors = 'surrogateescape' if mode == 't' else None with open(src_file, 'r' + mode, errors=errors) as src_data: src_content = src_data.read() if mode == 't': src_content = src_content.encode(errors=errors) return hashlib.md5(src_content).hexdigest() def _replace_md5(fname_new, fname_old=None, method='move', mode='b'): assert method in ('move', 'copy') if fname_old is None: assert fname_new.endswith('.new') fname_old = os.path.splitext(fname_new)[0] if os.path.isfile(fname_old) and (get_md5sum(fname_old, mode) == get_md5sum(fname_new, mode)): if method == 'move': os.remove(fname_new) else: if method == 'move': move(fname_new, fname_old) else: copyfile(fname_new, fname_old) assert os.path.isfile(fname_old) class Bunch(dict): """Dictionary-like object that exposes its keys as attributes.""" def __init__(self, **kwargs): # noqa: D102 dict.__init__(self, kwargs) self.__dict__ = self def _has_pypandoc(): """Check if pypandoc package available.""" try: import pypandoc # noqa # Import error raised only when function called version = pypandoc.get_pandoc_version() except (ImportError, OSError): return None, None else: return True, version
Eric89GXL/sphinx-gallery
sphinx_gallery/utils.py
Python
bsd-3-clause
5,635
#!/usr/bin/python import sys import math import string import multiprocessing import subprocess if len(sys.argv) < 2: print("supply the path to the doctest executable as the first argument!") sys.exit(1) # get the number of tests in the doctest executable num_tests = 0 program_with_args = [sys.argv[1], "--dt-count=1"] for i in range(2, len(sys.argv)): program_with_args.append(sys.argv[i]) result = subprocess.Popen(program_with_args, stdout = subprocess.PIPE).communicate()[0] result = result.splitlines(True) for line in result: if line.startswith("[doctest] unskipped test cases passing the current filters:"): num_tests = int(line.rsplit(' ', 1)[-1]) # calculate the ranges cores = multiprocessing.cpu_count() l = range(num_tests + 1) n = int(math.ceil(float(len( l )) / cores)) data = [l[i : i + n] for i in range(1, len( l ), n)] data = tuple([[x[0], x[-1]] for x in data]) # for 8 cores and 100 tests the ranges will look like this # ([1, 13], [14, 26], [27, 39], [40, 52], [53, 65], [66, 78], [79, 91], [92, 100]) # the worker callback that runs the executable for the given range of tests def worker((first, last)): program_with_args = [sys.argv[1], "--dt-first=" + str(first), "--dt-last=" + str(last)] subprocess.Popen(program_with_args) # run the tasks on a pool if __name__ == '__main__': p = multiprocessing.Pool(cores) p.map(worker, data)
abeimler/cmdocker-tmpl
libs/doctest/examples/range_based_execution.py
Python
mit
1,405